repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
zamattiac/osf.io | scripts/annotate_rsvps.py | 60 | 2256 | """Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
| apache-2.0 |
hschovanec-usgs/magpy | magpy/gui/monitorpage.py | 1 | 10276 | #!/usr/bin/env python
from __future__ import print_function
try:
from magpy.stream import *
from magpy.absolutes import *
from magpy.transfer import *
from magpy.database import *
except:
from magpy.stream import *
from magpy.absolutes import *
from magpy.transfer import *
from magpy.database import *
import wx
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import multiprocessing
class MonitorPage(wx.Panel):
def __init__(self, *args, **kwds):
wx.Panel.__init__(self, *args, **kwds)
self.createControls()
self.doLayout()
# Widgets
def createControls(self):
# all buttons open dlg to add parameters (e.g. IP,
self.getMARTASButton = wx.Button(self,-1,"Connect to MARTAS", size=(160,30))
self.getMARCOSButton = wx.Button(self,-1,"Connect to MARCOS", size=(160,30))
self.getMQTTButton = wx.Button(self,-1,"Connect to MQTT", size=(160,30))
self.martasLabel = wx.TextCtrl(self, value="not connected", size=(160,30), style=wx.TE_RICH) # red bg
self.marcosLabel = wx.TextCtrl(self, value="not connected", size=(160,30), style=wx.TE_RICH) # red bg
self.mqttLabel = wx.TextCtrl(self, value="not connected", size=(160,30), style=wx.TE_RICH) # red bg
self.marcosLabel.SetEditable(False)
self.martasLabel.SetEditable(False)
self.mqttLabel.SetEditable(False)
# Parameters if connection is established
#
self.coverageLabel = wx.StaticText(self, label="Plot coverage (sec):", size=(160,30))
self.coverageTextCtrl = wx.TextCtrl(self, value="600", size=(160,30))
self.sliderLabel = wx.StaticText(self, label="Update period (sec):", size=(160,30))
self.frequSlider = wx.Slider(self, -1, 10, 1, 60, (-1, -1), (100, -1),
wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
self.startMonitorButton = wx.Button(self,-1,"Start Monitor", size=(160,30)) # if started then everything else will be disabled ..... except save monitor
self.stopMonitorButton = wx.Button(self,-1,"Stop Monitor", size=(160,30))
self.saveMonitorButton = wx.Button(self,-1,"Log data", size=(160,30)) # produces a bin file
#self.startMonitorButton.Disable()
#self.stopMonitorButton.Disable()
# Connection Log
#
self.connectionLogLabel = wx.StaticText(self, label="Connection Log:")
self.connectionLogTextCtrl = wx.TextCtrl(self, wx.ID_ANY, size=(330,300),
style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.VSCROLL)
def doLayout(self):
mainSizer = wx.BoxSizer(wx.VERTICAL)
# A horizontal BoxSizer will contain the GridSizer (on the left)
# and the logger text control (on the right):
boxSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
# A GridSizer will contain the other controls:
gridSizer = wx.FlexGridSizer(rows=20, cols=2, vgap=10, hgap=10)
# Prepare some reusable arguments for calling sizer.Add():
expandOption = dict(flag=wx.EXPAND)
noOptions = dict()
emptySpace = ((0, 0), noOptions)
# Add the controls to the sizers:
for control, options in \
[(self.getMARTASButton, dict(flag=wx.ALIGN_CENTER)),
(self.martasLabel, noOptions),
(self.getMARCOSButton, dict(flag=wx.ALIGN_CENTER)),
(self.marcosLabel, noOptions),
(self.getMQTTButton, dict(flag=wx.ALIGN_CENTER)),
(self.mqttLabel, noOptions),
emptySpace,
emptySpace,
(self.coverageLabel, noOptions),
(self.coverageTextCtrl, expandOption),
(self.sliderLabel, noOptions),
(self.frequSlider, expandOption),
emptySpace,
emptySpace,
(self.startMonitorButton, dict(flag=wx.ALIGN_CENTER)),
(self.stopMonitorButton, dict(flag=wx.ALIGN_CENTER)),
(self.saveMonitorButton, dict(flag=wx.ALIGN_CENTER)),
emptySpace]:
gridSizer.Add(control, **options)
for control, options in \
[(gridSizer, dict(border=5, flag=wx.ALL))]:
boxSizer.Add(control, **options)
#self.SetSizerAndFit(boxSizer)
mainSizer.Add(boxSizer, 1, wx.EXPAND)
mainSizer.Add(self.connectionLogLabel, 0, wx.ALIGN_LEFT | wx.ALL, 3)
mainSizer.Add(self.connectionLogTextCtrl, 0, wx.ALIGN_LEFT | wx.ALL, 3)
self.SetSizerAndFit(mainSizer)
def logMsg(self, message):
''' Private method to append a string to the logger text
control. '''
#print message
self.connectionLogTextCtrl.AppendText('%s\n'%message)
def collector(self):
"""
A copy of the collector moon function
To be called using multiprocessing
This way the buffer should be accessible for displaying data
"""
# To be defined
clientname = 'europa'
clientip = '192.168.178.21'
martaspath = '/home/cobs/MARTAS'
# To be defined in options
destpath = "/tmp"
homedir = '/home/leon/CronScripts/MagPyAnalysis/Subscribing2MARTAS'
defaultuser = 'cobs'
stationid = 'MyHome'
dbname = 'mydb'
# Select destination (options: 'file' or 'db') - Files are saved in .../MARCOS/MartasFiles/
dest = 'file'
# For Testing purposes - Print received data to screen:
printdata = True
# Please make sure that the db and scp connection data is stored
# within the credential file -otherwise provide this data directly
martasname = 'europa'
dbhost = mpcred.lc(dbname,'host')
dbuser = mpcred.lc(dbname,'user')
dbpasswd = mpcred.lc(dbname,'passwd')
dbname = mpcred.lc(dbname,'db')
scpuser = mpcred.lc(martasname,'user')
scppasswd = mpcred.lc(martasname,'passwd')
logdir = os.path.join(homedir,'MARCOS','Logs')
logfile = os.path.join(logdir,'marcos.log')
if not os.path.exists(logdir):
os.makedirs(logdir)
log.startLogging(open(logfile,'a'))
#log.startLogging(sys.stdout)
sshcredlst = [scpuser,scppasswd]
# ----------------------------------------------------------
# 2. connect to database and check availability and version
# ----------------------------------------------------------
# The following should only be required in case of db
if dest == 'db':
try:
db = mysql.connect (host=dbhost,user=dbuser,passwd=dbpasswd,db=dbname)
dbcredlst = [dbhost,dbuser,dbpasswd,dbname]
except:
print("Create a credential file first or provide login info for database directly")
raise
cursor = db.cursor ()
cursor.execute ("SELECT VERSION()")
row = cursor.fetchone ()
print("MySQL server version:", row[0])
cursor.close ()
db.close ()
else:
dbcredlst = []
# Please note: scp is not workings from root
# Therefore the following processes are performed as defaultuser (ideally as a subprocess)
uid=pwd.getpwnam(defaultuser)[2]
os.setuid(uid)
sensfile = os.path.join(martaspath,'sensors.txt')
owfile = os.path.join(martaspath,'owlist.csv')
if not os.path.exists(os.path.join(destpath,'MartasSensors')):
os.makedirs(os.path.join(destpath,'MartasSensors'))
destsensfile = os.path.join(destpath,'MartasSensors',clientname+'_sensors.txt')
destowfile = os.path.join(destpath,'MartasSensors',clientname+'_owlist.csv')
print("Getting sensor information from ", clientname)
try:
scptransfer(scpuser+'@'+clientip+':'+sensfile,destsensfile,scppasswd)
except:
print("Could not connect to/get sensor info of client %s - aborting" % clientname)
print("Please make sure that you connected at least once to the client by ssh")
print(" from your defaultuser %s " % defaultuser)
print(" This way the essential key data is established.")
sys.exit()
print("Searching for onewire data from ", clientname)
try:
scptransfer(scpuser+'@'+clientip+':'+owfile,destowfile,scppasswd)
except:
print("No one wire info available on client %s - proceeding" % clientname)
pass
s,o = [],[]
if os.path.exists(destsensfile):
with open(destsensfile,'rb') as f:
reader = csv.reader(f)
s = []
for line in reader:
print(line)
if len(line) < 2:
try:
s.append(line[0].split())
except:
# Empty line for example
pass
else:
s.append(line)
print(s)
else:
print("Apparently no sensors defined on client %s - aborting" % clientname)
sys.exit()
if os.path.exists(destowfile):
with open(destowfile,'rb') as f:
reader = csv.reader(f)
o = [line for line in reader]
print(o)
factory = WampClientFactory("ws://"+clientip+":9100", debugWamp = False)
cl.sendparameter(clientname,clientip,destpath,dest,stationid,sshcredlst,s,o,printdata,dbcredlst)
factory.protocol = cl.PubSubClient
connectWS(factory)
reactor.run()
def run(self):
"""
Now start the collection process
"""
# Start collection as process one
p1 = multiprocessing.Process(target=self.collector)
p1.start()
p1.join()
# Start visualization as process two
# google search, how to access an gradually filling array
| gpl-3.0 |
abinashpanda/pgmpy | pgmpy/sampling/NUTS.py | 1 | 28385 | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
from pgmpy.sampling import HamiltonianMCDA
from pgmpy.sampling import LeapFrog
from pgmpy.utils import _check_1d_array_object, _check_length_equal
class NoUTurnSampler(HamiltonianMCDA):
"""
Class for performing sampling in Continuous model
using No U Turn Sampler (a variant of Hamiltonian Monte Carlo)
Parameters:
-----------
model: An instance pgmpy.models
Model from which sampling has to be done
grad_log_pdf: A subclass of pgmpy.inference.continuous.GradientLogPDF
Class to compute the log and gradient log of distribution
simulate_dynamics: A subclass of pgmpy.inference.continuous.BaseSimulateHamiltonianDynamics
Class to propose future states of position and momentum in time by simulating
HamiltonianDynamics
Public Methods:
---------------
sample()
generate_sample()
Example:
--------
>>> from pgmpy.inference.continuous import NoUTurnSampler as NUTS, LeapFrog, GradLogPDFGaussian
>>> from pgmpy.factors import JointGaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([1, 2, 3])
>>> covariance = np.array([[4, 0.1, 0.2], [0.1, 1, 0.3], [0.2, 0.3, 8]])
>>> model = JGD(['x', 'y', 'z'], mean, covariance)
>>> sampler = NUTS(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.sample(initial_pos=np.array([0.1, 0.9, 0.3]), num_samples=20000,stepsize=0.4)
>>> samples
rec.array([(0.1, 0.9, 0.3),
(-0.27303886844752756, 0.5028580705249155, 0.2895768065049909),
(1.7139810571103862, 2.809135711303245, 5.690811523613858), ...,
(-0.7742669710786649, 2.092867703984895, 6.139480724333439),
(1.3916152816323692, 1.394952482021687, 3.446906546649354),
(-0.2726336476939125, 2.6230854954595357, 2.923948403903159)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
References
----------
Matthew D. Hoffman, Andrew Gelman, The No-U-Turn Sampler: Adaptively
Setting Path Lengths in Hamiltonian Monte Carlo. Journal of
Machine Learning Research 15 (2014) 1351-1381
Algorithm 3 : Efficient No-U-Turn Sampler
"""
def __init__(self, model, grad_log_pdf, simulate_dynamics=LeapFrog):
super(NoUTurnSampler, self).__init__(model=model, grad_log_pdf=grad_log_pdf,
simulate_dynamics=simulate_dynamics)
def _initalize_tree(self, position, momentum, slice_var, stepsize):
"""
Initalizes root node of the tree, i.e depth = 0
"""
position_bar, momentum_bar, _ = self.simulate_dynamics(self.model, position, momentum, stepsize,
self.grad_log_pdf).get_proposed_values()
_, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
hamiltonian = logp_bar - 0.5 * np.dot(momentum_bar, momentum_bar)
candidate_set_size = slice_var < np.exp(hamiltonian)
accept_set_bool = hamiltonian > np.log(slice_var) - 10000 # delta_max = 10000
return position_bar, momentum_bar, candidate_set_size, accept_set_bool
def _update_acceptance_criteria(self, position_forward, position_backward, momentum_forward, momentum_backward,
accept_set_bool, candidate_set_size, candidate_set_size2):
# criteria1 = I[(θ+ − θ−)·r− ≥ 0]
criteria1 = np.dot((position_forward - position_backward), momentum_backward) >= 0
# criteira2 = I[(θ+ − θ− )·r+ ≥ 0]
criteria2 = np.dot((position_forward - position_backward), momentum_forward) >= 0
accept_set_bool = accept_set_bool and criteria1 and criteria2
candidate_set_size += candidate_set_size2
return accept_set_bool, candidate_set_size
def _build_tree(self, position, momentum, slice_var, direction, depth, stepsize):
"""
Recursively builds a tree for proposing new position and momentum
"""
# Parameter names in algorithm (here -> representation in algorithm)
# position -> theta, momentum -> r, slice_var -> u, direction -> v, depth ->j, stepsize -> epsilon
# candidate_set_size -> n, accept_set_bool -> s
if depth == 0:
# Take single leapfrog step in the given direction (direction * stepsize)
position_bar, momentum_bar, candidate_set_size, accept_set_bool =\
self._initalize_tree(position, momentum, slice_var, direction * stepsize)
return (position_bar, momentum_bar, position_bar, momentum_bar, position_bar,
candidate_set_size, accept_set_bool)
else:
# Build left and right subtrees
(position_backward, momentum_backward, position_forward, momentum_forward, position_bar,
candidate_set_size, accept_set_bool) = self._build_tree(position, momentum,
slice_var, direction, depth - 1, stepsize)
if accept_set_bool == 1:
if direction == -1:
# Build tree in backward direction
(position_backward, momentum_backward, _, _, position_bar2, candidate_set_size2,
accept_set_bool2) = self._build_tree(position_backward, momentum_backward,
slice_var, direction, depth - 1, stepsize)
else:
# Build tree in forward direction
(_, _, position_forward, momentum_forward, position_bar2, candidate_set_size2,
accept_set_bool2) = self._build_tree(position_forward, momentum_forward,
slice_var, direction, depth - 1, stepsize)
if np.random.rand() < candidate_set_size2 / (candidate_set_size2 + candidate_set_size):
position_bar = position_bar2
accept_set_bool, candidate_set_size =\
self._update_acceptance_criteria(position_forward, position_backward, momentum_forward,
momentum_backward, accept_set_bool2, candidate_set_size,
candidate_set_size2)
return (position_backward, momentum_backward, position_forward, momentum_forward,
position_bar, candidate_set_size, accept_set_bool)
def _sample(self, position, stepsize):
"""
Returns a sample using a single iteration of NUTS
"""
# Re-sampling momentum
momentum = np.random.normal(0, 1, len(position))
# Initializations
depth = 0
position_backward, position_forward = position, position
momentum_backward, momentum_forward = momentum, momentum
candidate_set_size = accept_set_bool = 1
_, log_pdf = self.grad_log_pdf(position, self.model).get_gradient_log_pdf()
# Resample slice variable `u`
slice_var = np.random.uniform(0, np.exp(log_pdf - 0.5 * np.dot(momentum, momentum)))
while accept_set_bool == 1:
direction = np.random.choice([-1, 1], p=[0.5, 0.5])
if direction == -1:
# Build a tree in backward direction
(position_backward, momentum_backward, _, _, position_bar,
candidate_set_size2, accept_set_bool2) = self._build_tree(position_backward, momentum_backward,
slice_var, direction, depth, stepsize)
else:
# Build tree in forward direction
(_, _, position_forward, momentum_forward, position_bar,
candidate_set_size2, accept_set_bool2) = self._build_tree(position_forward, momentum_forward,
slice_var, direction, depth, stepsize)
if accept_set_bool2 == 1:
if np.random.rand() < candidate_set_size2 / candidate_set_size:
position = position_bar.copy()
accept_set_bool, candidate_set_size = self._update_acceptance_criteria(position_forward, position_backward,
momentum_forward, momentum_backward,
accept_set_bool2, candidate_set_size,
candidate_set_size2)
depth += 1
return position
def sample(self, initial_pos, num_samples, stepsize=None):
"""
Method to return samples using No U Turn Sampler
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
Returns
-------
Returns two different types (based on installations)
pandas.DataFrame: Returns samples as pandas.DataFrame if environment has a installation of pandas
numpy.recarray: Returns samples in form of numpy recorded arrays (numpy.recarray)
Examples
---------
>>> # If environment has a installation of pandas
>>> from pgmpy.inference.continuous import NoUTurnSampler as NUTS, GradLogPDFGaussian, LeapFrog
>>> from pgmpy.factors import JointGaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([0, 0, 0])
>>> covariance = np.array([[6, 0.7, 0.2], [0.7, 3, 0.9], [0.2, 0.9, 1]])
>>> model = JGD(['x', 'y', 'z'], mean, covariance)
>>> sampler = NUTS(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.sample(initial_pos=np.array([1, 1, 1]), num_samples=10, stepsize=0.4)
>>> samples
x y z
0 1.000000 1.000000 1.000000
1 1.760756 0.271543 -0.613309
2 1.883387 0.990745 -0.611720
3 0.980812 0.340336 -0.916283
4 0.781338 0.647220 -0.948640
5 0.040308 -1.391406 0.412201
6 1.179549 -1.450552 1.105216
7 1.100320 -1.313926 1.207815
8 1.484520 -1.349247 0.768599
9 0.934942 -1.894589 0.471772
"""
initial_pos = _check_1d_array_object(initial_pos, 'initial_pos')
_check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables')
if stepsize is None:
stepsize = self._find_reasonable_stepsize(initial_pos)
types = [(var_name, 'float') for var_name in self.model.variables]
samples = np.zeros(num_samples, dtype=types).view(np.recarray)
samples[0] = tuple(initial_pos)
position_m = initial_pos
for i in range(1, num_samples):
# Genrating sample
position_m = self._sample(position_m, stepsize)
samples[i] = position_m
if HAS_PANDAS is True:
return pd.DataFrame.from_records(samples)
return samples
def generate_sample(self, initial_pos, num_samples, stepsize=None):
"""
Returns a generator type object whose each iteration yields a sample
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
Returns
-------
genrator: yielding a numpy.array type object for a sample
Examples
---------
>>> from pgmpy.inference.continuous import NoUTurnSampler as NUTS, GradLogPDFGaussian
>>> from pgmpy.factors import JointGaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([11, -6])
>>> covariance = np.array([[0.7, 0.2], [0.2, 14]])
>>> model = JGD(['x', 'y'], mean, covariance)
>>> sampler = NUTS(model=model, grad_log_pdf=GradLogPDFGaussian)
>>> samples = sampler.generate_sample(initial_pos=np.array([1, 1]), num_samples=10, stepsize=0.4)
>>> samples = np.array([sample for sample in samples])
>>> samples
array([[ 10.26357538, 0.10062725],
[ 12.70600336, 0.63392499],
[ 10.95523217, -0.62079273],
[ 10.66263031, -4.08135962],
[ 10.59255762, -8.48085076],
[ 9.99860242, -9.47096032],
[ 10.5733564 , -9.83504745],
[ 11.51302059, -9.49919523],
[ 11.31892143, -8.5873259 ],
[ 11.29008667, -0.43809674]])
"""
initial_pos = _check_1d_array_object(initial_pos, 'initial_pos')
_check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables')
if stepsize is None:
stepsize = self._find_reasonable_stepsize(initial_pos)
position_m = initial_pos
for _ in range(0, num_samples):
position_m = self._sample(position_m, stepsize)
yield position_m
class NoUTurnSamplerDA(NoUTurnSampler):
"""
Class for performing sampling in Continuous model
using No U Turn sampler with dual averaging for
adaptaion of parameter stepsize.
Parameters:
-----------
model: An instance pgmpy.models
Model from which sampling has to be done
grad_log_pdf: A subclass of pgmpy.inference.continuous.GradientLogPDF
Class to compute the log and gradient log of distribution
simulate_dynamics: A subclass of pgmpy.inference.continuous.BaseSimulateHamiltonianDynamics
Class to propose future states of position and momentum in time by simulating
HamiltonianDynamics
delta: float (in between 0 and 1), defaults to 0.65
The target HMC acceptance probability
Public Methods:
---------------
sample()
generate_sample()
Example:
--------
>>> from pgmpy.inference.continuous import NoUTurnSamplerDA as NUTSda, GradLogPDFGaussian
>>> from pgmpy.factors import JointGaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([-1, 12, -3])
>>> covariance = np.array([[-2, 7, 2], [7, 14, 4], [2, 4, -1]])
>>> model = JGD(['x', 'v', 't'], mean, covariance)
>>> sampler = NUTSda(model=model, grad_log_pdf=GradLogPDFGaussian)
>>> samples = sampler.sample(initial_pos=np.array([0, 0, 0]), num_adapt=10, num_samples=10, stepsize=0.25)
>>> samples
rec.array([(0.0, 0.0, 0.0),
(0.06100992691638076, -0.17118088764170125, 0.14048470935160887),
(0.06100992691638076, -0.17118088764170125, 0.14048470935160887),
(-0.7451883138013118, 1.7975387358691155, 2.3090698721374436),
(-0.6207457594500309, 1.4611049498441024, 2.5890867012835574),
(0.24043604780911487, 1.8660976216530618, 3.2508715592645347),
(0.21509819341468212, 2.157760225367607, 3.5749582768731476),
(0.20699150582681913, 2.0605044285377305, 3.8588980251618135),
(0.20699150582681913, 2.0605044285377305, 3.8588980251618135),
(0.085332419611991, 1.7556171374575567, 4.49985082288814)],
dtype=[('x', '<f8'), ('v', '<f8'), ('t', '<f8')])
References
----------
Matthew D. Hoffman, Andrew Gelman, The No-U-Turn Sampler: Adaptively
Setting Path Lengths in Hamiltonian Monte Carlo. Journal of
Machine Learning Research 15 (2014) 1351-1381
Algorithm 6 : No-U-Turn Sampler with Dual Averaging
"""
def __init__(self, model, grad_log_pdf, simulate_dynamics=LeapFrog, delta=0.65):
if not isinstance(delta, float) or delta > 1.0 or delta < 0.0:
raise ValueError(
"delta should be a floating value in between 0 and 1")
self.delta = delta
super(NoUTurnSamplerDA, self).__init__(model=model, grad_log_pdf=grad_log_pdf,
simulate_dynamics=simulate_dynamics)
def _build_tree(self, position, momentum, slice_var, direction, depth, stepsize, position0, momentum0):
"""
Recursively builds a tree for proposing new position and momentum
"""
if depth == 0:
position_bar, momentum_bar, candidate_set_size, accept_set_bool =\
self._initalize_tree(position, momentum, slice_var, direction * stepsize)
alpha = min(1, self._acceptance_prob(position, position_bar, momentum, momentum_bar))
return (position_bar, momentum_bar, position_bar, momentum_bar, position_bar,
candidate_set_size, accept_set_bool, alpha, 1)
else:
(position_backward, momentum_backward, position_forward, momentum_forward, position_bar,
candidate_set_size, accept_set_bool, alpha, n_alpha) =\
self._build_tree(position, momentum, slice_var,
direction, depth - 1, stepsize, position0, momentum0)
if accept_set_bool == 1:
if direction == -1:
# Build tree in backward direction
(position_backward, momentum_backward, _, _, position_bar2, candidate_set_size2, accept_set_bool2,
alpha2, n_alpha2) = self._build_tree(position_backward, momentum_backward, slice_var, direction,
depth - 1, stepsize, position0, momentum0)
else:
# Build tree in forward direction
(_, _, position_forward, momentum_forward, position_bar2, candidate_set_size2, accept_set_bool2,
alpha2, n_alpha2) = self._build_tree(position_forward, momentum_forward, slice_var, direction,
depth - 1, stepsize, position0, momentum0)
if np.random.rand() < candidate_set_size2 / (candidate_set_size2 + candidate_set_size):
position_bar = position_bar2
alpha += alpha2
n_alpha += n_alpha2
accept_set_bool, candidate_set_size =\
self._update_acceptance_criteria(position_forward, position_backward, momentum_forward,
momentum_backward, accept_set_bool2, candidate_set_size,
candidate_set_size2)
return (position_backward, momentum_backward, position_forward, momentum_forward, position_bar,
candidate_set_size, accept_set_bool, alpha, n_alpha)
def _sample(self, position, stepsize):
"""
Returns a sample using a single iteration of NUTS with dual averaging
"""
# Re-sampling momentum
momentum = np.random.normal(0, 1, len(position))
# Initializations
depth = 0
position_backward, position_forward = position, position
momentum_backward, momentum_forward = momentum, momentum
candidate_set_size = accept_set_bool = 1
position_m_1 = position
_, log_pdf = self.grad_log_pdf(position, self.model).get_gradient_log_pdf()
# Resample slice variable `u`
slice_var = np.random.uniform(0, np.exp(log_pdf - 0.5 * np.dot(momentum, momentum)))
while accept_set_bool == 1:
direction = np.random.choice([-1, 1], p=[0.5, 0.5])
if direction == -1:
# Build a tree in backward direction
(position_backward, momentum_backward, _, _, position_bar, candidate_set_size2, accept_set_bool2,
alpha, n_alpha) = self._build_tree(position_backward, momentum_backward, slice_var, direction,
depth, stepsize, position_m_1, momentum)
else:
# Build tree in forward direction
(_, _, position_forward, momentum_forward, position_bar, candidate_set_size2, accept_set_bool2,
alpha, n_alpha) = self._build_tree(position_forward, momentum_forward, slice_var, direction,
depth, stepsize, position_m_1, momentum)
if accept_set_bool2 == 1:
if np.random.rand() < candidate_set_size2 / candidate_set_size:
position = position_bar
accept_set_bool, candidate_set_size = self._update_acceptance_criteria(position_forward, position_backward,
momentum_forward, momentum_backward,
accept_set_bool2, candidate_set_size,
candidate_set_size2)
depth += 1
return position, alpha, n_alpha
def sample(self, initial_pos, num_adapt, num_samples, stepsize=None):
"""
Returns samples using No U Turn Sampler with dual averaging
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_adapt: int
The number of interations to run the adaptation of stepsize
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
Returns
-------
Returns two different types (based on installations)
pandas.DataFrame: Returns samples as pandas.DataFrame if environment has a installation of pandas
numpy.recarray: Returns samples in form of numpy recorded arrays (numpy.recarray)
Examples
---------
>>> # If environment has a installation of pandas
>>> from pgmpy.inference.continuous import NoUTurnSamplerDA as NUTSda, GradLogPDFGaussian, LeapFrog
>>> from pgmpy.factors import JointGaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([10, -13])
>>> covariance = np.array([[16, -3], [-3, 13]])
>>> model = JGD(['x', 'y'], mean, covariance)
>>> sampler = NUTSda(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.sample(initial_pos=np.array([12, -4]), num_adapt=10, num_samples=10, stepsize=0.1)
>>> samples
x y
0 12.000000 -4.000000
1 11.864821 -3.696109
2 10.546986 -4.892169
3 8.526596 -21.555793
4 8.526596 -21.555793
5 11.343194 -6.353789
6 -1.583269 -12.802931
7 12.411957 -11.704859
8 13.253336 -20.169492
9 11.295901 -7.665058
"""
initial_pos = _check_1d_array_object(initial_pos, 'initial_pos')
_check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables')
if stepsize is None:
stepsize = self._find_reasonable_stepsize(initial_pos)
if num_adapt <= 1:
return NoUTurnSampler(self.model, self.grad_log_pdf,
self.simulate_dynamics).sample(initial_pos, num_samples, stepsize)
mu = np.log(10.0 * stepsize)
stepsize_bar = 1.0
h_bar = 0.0
types = [(var_name, 'float') for var_name in self.model.variables]
samples = np.zeros(num_samples, dtype=types).view(np.recarray)
samples[0] = tuple(initial_pos)
position_m = initial_pos
for i in range(1, num_samples):
position_m, alpha, n_alpha = self._sample(position_m, stepsize)
samples[i] = position_m
if i <= num_adapt:
stepsize, stepsize_bar, h_bar = self._adapt_params(stepsize, stepsize_bar, h_bar, mu,
i, alpha, n_alpha)
else:
stepsize = stepsize_bar
if HAS_PANDAS is True:
return pd.DataFrame.from_records(samples)
return samples
def generate_sample(self, initial_pos, num_adapt, num_samples, stepsize=None):
"""
Returns a generator type object whose each iteration yields a sample
Parameters
----------
initial_pos: A 1d array like object
Vector representing values of parameter position, the starting
state in markov chain.
num_adapt: int
The number of interations to run the adaptation of stepsize
num_samples: int
Number of samples to be generated
stepsize: float , defaults to None
The stepsize for proposing new values of position and momentum in simulate_dynamics
If None, then will be choosen suitably
Returns
-------
genrator: yielding a numpy.array type object for a sample
Examples
--------
>>> from pgmpy.inference.continuous import NoUTurnSamplerDA as NUTSda, GradLogPDFGaussian
>>> from pgmpy.factors import JointGaussianDistribution as JGD
>>> import numpy as np
>>> mean = np.array([1, -100])
>>> covariance = np.array([[-12, 45], [45, -10]])
>>> model = JGD(['a', 'b'], mean, covariance)
>>> sampler = NUTSda(model=model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=LeapFrog)
>>> samples = sampler.generate_sample(initial_pos=np.array([12, -4]), num_adapt=10,
... num_samples=10, stepsize=0.1)
>>> samples
<generator object NoUTurnSamplerDA.generate_sample at 0x7f4fed46a4c0>
>>> samples_array = np.array([sample for sample in samples])
>>> samples_array
array([[ 11.89963386, -4.06572636],
[ 10.3453755 , -7.5700289 ],
[-26.56899659, -15.3920684 ],
[-29.97143077, -12.0801625 ],
[-29.97143077, -12.0801625 ],
[-33.07960829, -8.90440347],
[-55.28263496, -17.31718524],
[-55.28263496, -17.31718524],
[-56.63440044, -16.03309364],
[-63.880094 , -19.19981944]])
"""
initial_pos = _check_1d_array_object(initial_pos, 'initial_pos')
_check_length_equal(initial_pos, self.model.variables, 'initial_pos', 'model.variables')
if stepsize is None:
stepsize = self._find_reasonable_stepsize(initial_pos)
if num_adapt <= 1: # return sample generated using Simple HMC algorithm
for sample in NoUTurnSampler(self.model, self.grad_log_pdf,
self.simulate_dynamics).generate_sample(initial_pos, num_samples, stepsize):
yield sample
return
mu = np.log(10.0 * stepsize)
stepsize_bar = 1.0
h_bar = 0.0
position_m = initial_pos.copy()
num_adapt += 1
for i in range(1, num_samples + 1):
position_m, alpha, n_alpha = self._sample(position_m, stepsize)
if i <= num_adapt:
stepsize, stepsize_bar, h_bar = self._adapt_params(stepsize, stepsize_bar, h_bar, mu,
i, alpha, n_alpha)
else:
stepsize = stepsize_bar
yield position_m
| mit |
openmrslab/suspect | tests/test_mrs/test_io.py | 1 | 4519 | import suspect
import suspect.io.tarquin
import pytest
import unittest.mock
import builtins
from unittest.mock import patch
import os
from suspect.io._common import complex_array_from_iter
import numpy
def test_complex_from_iter():
float_list = [1.0, 0.0, 0.0, 1.0]
array = complex_array_from_iter(iter(float_list))
assert array.shape == (2,)
assert array[0] == 1
assert array[1] == 1j
def test_shaped_complex_from_iter():
float_list = [1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0]
array = complex_array_from_iter(iter(float_list), shape=[2, 2])
assert array.shape == (2, 2)
def test_write_dpt():
data = suspect.MRSData(numpy.zeros(1), 1e-3, 123.456)
mock = unittest.mock.mock_open()
with patch.object(builtins, 'open', mock):
suspect.io.tarquin.save_dpt("/home/ben/test_dpt.dpt", data)
#print(mock.mock_calls)
#handle = mock()
#print(handle.write.call_args())
def test_read_tarquin_results():
fitting_result = suspect.io.tarquin.read_output("tests/test_data/tarquin/tarquin_results.txt")
assert "metabolite_fits" in fitting_result
assert "quality" in fitting_result
assert fitting_result["quality"]["Metab FWHM (PPM)"] == 0.04754
assert fitting_result["quality"]["Q"] == 4.048
def test_write_raw():
# lcmodel needs to know the transform properties
transform = suspect.transformation_matrix([1, 0, 0], [0, 1, 0], [0, 0, 0], [10, 10, 10])
data = suspect.MRSData(numpy.zeros(1, 'complex'),
1e-3,
123.456,
transform=transform)
mock = unittest.mock.mock_open()
with patch.object(builtins, 'open', mock):
suspect.io.lcmodel.save_raw("/home/ben/test_raw.raw", data)
#print(mock().write.mock_calls)
#handle = mock()
#print(handle.write.call_args())
def test_lcmodel_all_files():
# lcmodel needs to know the transform properties
transform = suspect.transformation_matrix([1, 0, 0], [0, 1, 0], [0, 0, 0], [10, 10, 10])
data = suspect.MRSData(numpy.zeros(1, 'complex'),
1e-3,
123.456,
transform=transform)
mock = unittest.mock.mock_open()
with patch.object(builtins, 'open', mock):
suspect.io.lcmodel.write_all_files(os.path.join(os.getcwd(), "lcmodel"),
data)
#print(mock.call_args)
#print(mock().write.mock_calls)
def test_lcmodel_read_coord():
fitting_result = suspect.io.lcmodel.read_coord("tests/test_data/lcmodel/svs_97.COORD")
assert len(fitting_result["metabolite_fits"]) == 41
def test_lcmodel_read_liver_coord():
fitting_result = suspect.io.lcmodel.read_coord("tests/test_data/lcmodel/liver.COORD")
def test_lcmodel_read_basis():
basis = suspect.io.lcmodel.read_basis("tests/test_data/lcmodel/press_30ms_3T.basis")
#print(basis)
#from matplotlib import pyplot
#met = "NAA"
#sw = 1.0 / basis["BASIS1"]["BADELT"]
#fa = numpy.linspace(0, sw, len(basis[met]["data"]))
#pyplot.plot(fa, numpy.abs(numpy.roll(basis[met]["data"], -basis[met]["ISHIFT"])))
#pyplot.show()
assert basis["BASIS1"]["BADELT"] == 0.000207357807
assert basis["BASIS1"]["NDATAB"] == 4944
assert "NAA" in basis["SPECTRA"]
def test_lcmodel_write_basis():
basis = suspect.io.lcmodel.read_basis("tests/test_data/lcmodel/press_30ms_3T.basis")
mock = unittest.mock.mock_open()
with patch.object(builtins, 'open', mock):
suspect.io.lcmodel.save_basis("/home/ben/test_raw.raw", basis)
#print(mock().write.mock_calls)
# handle = mock()
# print(handle.write.call_args())
#def test_extract_csi_fid():
# data = suspect.io.rda.load_rda("suspect/tests/test_data/CSITEST_20151028_97_1.rda")
# single_voxel = data[0, 8, 8]
# suspect.io.tarquin.save_dpt("/home/ben/test_dpt2.dpt", single_voxel)
#def test_load_sdat():
# data = suspect.io.load_sdat("suspect/tests/test_data/SS0044_214-SS0044_214-WIP_SV_P40_LOC_R1-601_act.sdat")
# assert data.te == 30
# assert data.f0 == 127.769903
# assert data.shape == (192, 2048)
def test_felix_save_mat():
data = suspect.MRSData(numpy.zeros((16, 32), dtype='complex'), 1e-3, 123.456)
mock = unittest.mock.mock_open()
with patch.object(builtins, 'open', mock):
suspect.io.felix.save_mat("test.mat", data)
#print(mock.mock_calls)
# handle = mock()
# print(handle.write.call_args()) | mit |
elkingtonmcb/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
FESOM/pyfesom | pyfesom/tools/showme.py | 1 | 10117 | import click
from netCDF4 import Dataset, MFDataset, num2date
import matplotlib as mpl
mpl.use('Qt5Agg')
#%matplotlib inline
import matplotlib.pylab as plt
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cmocean import cm as cmo
from matplotlib import cm
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../"))
print(sys.path)
import pyfesom as pf
from cartopy.util import add_cyclic_point
from scipy.interpolate import griddata
import scipy.spatial.qhull as qhull
from scipy.interpolate import LinearNDInterpolator, CloughTocher2DInterpolator
from cartopy.util import add_cyclic_point
@click.command()
@click.argument('meshpath', type=click.Path(exists=True))
@click.argument('ifile', type=click.Path(exists=True))
@click.argument('variable', default='temp', required=False)
@click.option('--depth', '-d', default=0, type=click.FLOAT, show_default=True,
help='Depth in meters.')
@click.option('--box', '-b',
nargs=4,
type=(click.IntRange(-180, 180),
click.IntRange(-180, 180),
click.IntRange(-90, 90),
click.IntRange(-90, 90)),
default=(-180,180,-80,90), show_default=True,
help='Map boundaries in -180 180 -90 90 format.')
@click.option('--res', '-r', nargs=2,
type=(click.INT, click.INT),
default=(360, 170), show_default=True,
help='Number of points along each axis (for lon and lat).')
@click.option('--influence','-i', default=80000, show_default=True,
help='Radius of influence for interpolation, in meters.')
@click.option('--timestep', '-t', default=0, show_default=True,
help='Timstep from netCDF variable, strats with 0.')
@click.option('--levels', '-l', nargs=3, type=click.FLOAT,
help='Levels for contour plot in format min max numberOfLevels.\
If not provided min/max values from data will be used with 40 levels.')
@click.option('--quiet', '-q', is_flag=True,
help='If present additional information will not be printed.')
@click.option('--ofile', '-o', type=click.Path(exists=False),
help='Path to the output figure. If present the image\
will be saved to the file instead of showing it. ')
@click.option('--mapproj','-m', type=click.Choice(['merc', 'pc', 'np', 'sp', 'rob']),
default='rob', show_default=True,
help = 'Map projection. Options are Mercator (merc), Plate Carree (pc), North Polar Stereo (np), South Polar Stereo (sp), Robinson (rob)')
@click.option('--abg', nargs=3, type=(click.FLOAT,
click.FLOAT,
click.FLOAT), default=(50, 15, -90), show_default=True,
help='Alpha, beta and gamma Euler angles. If you plots look rotated, you use wrong abg values. Usually nessesary only during the first use of the mesh.')
@click.option('--clim','-c', type=click.Choice(['phc', 'woa05', 'gdem']),
help='Select climatology to compare to. If option is set the model bias to climatology will be shown.')
@click.option('--cmap', help='Name of the colormap from cmocean package or from the standard matplotlib set. By default `Spectral_r` will be used for property plots and `balance` for bias plots.')
@click.option('--interp', type=click.Choice(['nn', 'idist', 'linear', 'cubic']),
default='nn', show_default=True,
help = 'Interpolation method. Options are nn - nearest neighbor (KDTree implementation, fast), idist - inverse distance (KDTree implementation, decent speed), linear (scipy implementation, slow) and cubic (scipy implementation, slowest and give strange results on corarse meshes).')
@click.option('--ptype', type=click.Choice(['cf', 'pcm']), default = 'cf', show_default=True,
help = 'Plot type. Options are contourf (\'cf\') and pcolormesh (\'pcm\')')
@click.option('-k', type=click.INT, default = 5, show_default=True,
help ='k-th nearest neighbors to use. Only used when interpolation method (--interp) is idist')
def showfile(ifile, variable, depth,
meshpath, box, res, influence,
timestep, levels, quiet, ofile,
mapproj, abg, clim, cmap, interp,
ptype, k):
'''
meshpath - Path to the folder with FESOM1.4 mesh files.
ifile - Path to FESOM1.4 netCDF file.
variable - The netCDF variable to be plotted.
'''
if not quiet:
click.secho('Mesh: {}'.format(meshpath))
click.secho('File: {}'.format(ifile))
click.secho('Variable: {}'.format(variable), fg='red')
click.secho('Depth: {}'.format(depth), fg='red')
click.secho('BOX: {}'.format(box))
click.secho('Resolution: {}'.format(res))
click.secho('Influence raduis: {} meters'.format(influence), fg='red')
click.secho('Timestep: {}'.format(timestep))
if levels:
click.secho('Levels: {}'.format(levels), fg='red')
else:
click.secho('Levels: auto', fg='red')
mesh = loadmeshdata(meshpath, abg)
showme(mesh, ifile, variable, depth,
box, res, influence,
timestep, levels, quiet, ofile,
mapproj, abg, clim, cmap, interp,
ptype, k)
def loadmeshdata(meshpath, abg):
mesh = pf.load_mesh(meshpath, abg=abg, usepickle=False, usejoblib=True)
return mesh
def showme(mesh, ifile, variable='temp', depth=0,
box=[-180, 180, -90, 90], res=[360, 180], influence=80000,
timestep=0, levels=None, quiet=None, ofile=None,
mapproj='rob', abg=(50, 15, -90), clim=None, cmap=None, interp='nn',
ptype='cf', k=5):
if cmap:
if cmap in cmo.cmapnames:
colormap = cmo.cmap_d[cmap]
elif cmap in plt.cm.datad:
colormap = plt.get_cmap(cmap)
else:
raise ValueError('Get unrecognised name for the colormap `{}`. Colormaps should be from standard matplotlib set of from cmocean package.'.format(cmap))
else:
if clim:
colormap = cmo.cmap_d['balance']
else:
colormap = plt.get_cmap('Spectral_r')
sstep = timestep
radius_of_influence = influence
left, right, down, up = box
lonNumber, latNumber = res
print(ifile)
flf = Dataset(ifile)
lonreg = np.linspace(left, right, lonNumber)
latreg = np.linspace(down, up, latNumber)
lonreg2, latreg2 = np.meshgrid(lonreg, latreg)
dind=(abs(mesh.zlevs-depth)).argmin()
realdepth = mesh.zlevs[dind]
level_data, nnn = pf.get_data(flf.variables[variable][sstep], mesh, realdepth)
if interp =='nn':
ofesom = pf.fesom2regular(level_data, mesh, lonreg2, latreg2, radius_of_influence=radius_of_influence)
elif interp == 'idist':
ofesom = pf.fesom2regular(level_data, mesh, lonreg2, latreg2, radius_of_influence=radius_of_influence, how = 'idist', k = k)
elif interp == 'linear':
points = np.vstack((mesh.x2, mesh.y2)).T
qh = qhull.Delaunay(points)
ofesom = LinearNDInterpolator(qh, level_data)((lonreg2, latreg2))
elif interp == 'cubic':
points = np.vstack((mesh.x2, mesh.y2)).T
qh = qhull.Delaunay(points)
ofesom = CloughTocher2DInterpolator(qh, level_data)((lonreg2, latreg2))
if clim:
if variable=='temp':
climvar = 'T'
elif variable == 'salt':
climvar = 'S'
else:
raise ValueError('You have selected --clim/-c option, but variable `{}` is not in climatology. Acceptable values are `temp` and `salt` only.'.format(variable))
#os.path.join(os.path.dirname(__file__), "../")
pathToClim = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../data/")
print(pathToClim)
w = pf.climatology(pathToClim, clim)
xx, yy, oclim = pf.clim2regular(w, climvar, lonreg2, latreg2, levels=[realdepth],
radius_of_influence=radius_of_influence)
oclim = oclim[0, :, :]
data = ofesom - oclim
else:
data = ofesom
if mapproj == 'merc':
ax = plt.subplot(111, projection=ccrs.Mercator())
elif mapproj == 'pc':
ax = plt.subplot(111, projection=ccrs.PlateCarree())
elif mapproj == 'np':
ax = plt.subplot(111, projection=ccrs.NorthPolarStereo())
elif mapproj == 'sp':
ax = plt.subplot(111, projection=ccrs.SouthPolarStereo())
elif mapproj == 'rob':
ax = plt.subplot(111, projection=ccrs.Robinson())
ax.set_extent([left, right, down, up], crs=ccrs.PlateCarree())
if levels:
mmin, mmax, nnum = levels
nnum = int(nnum)
else:
mmin = np.nanmin(data)
mmax = np.nanmax(data)
nnum = 40
data_levels = np.linspace(mmin, mmax, nnum)
if ptype == 'cf':
mm = ax.contourf(lonreg,\
latreg,\
data,
levels = data_levels,
transform=ccrs.PlateCarree(),
cmap=colormap,
extend='both')
elif ptype == 'pcm':
data_cyc, lon_cyc = add_cyclic_point(data, coord=lonreg)
mm = ax.pcolormesh(lon_cyc,\
latreg,\
data_cyc,
vmin = mmin,
vmax = mmax,
transform=ccrs.PlateCarree(),
cmap=colormap,
)
else:
raise ValueError('Inknown plot type {}'.format(ptype))
ax.coastlines(resolution = '50m',lw=0.5)
ax.add_feature(cfeature.GSHHSFeature(levels=[1], scale='low', facecolor='lightgray'))
cb = plt.colorbar(mm, orientation='horizontal', pad=0.03)
cb.set_label(flf.variables[variable].units)
plt.title('{} at {}m.'.format(variable, realdepth))
plt.tight_layout()
if ofile:
plt.savefig(ofile, dpi=100)
else:
plt.show()
if __name__ == '__main__':
showfile()
| mit |
walid-ahmad/TieDecay | fast_process.py | 1 | 2952 | #!/usr/bin/env python
"""\
Fast-process data set by applying decay and summation directly at discrete
time points
Record only PageRank scores (not iterations)
"""
import sys
import operator
import numpy as np
import pandas as pd
import networkx as nx
from scipy import sparse
from tqdm import *
import tieDecayMat
import storage
import prcust
if __name__ == "__main__":
try:
half_lives = sys.argv[1:]
half_lives = [int(h) for h in half_lives]
half_lives = np.array(half_lives)
print "Half lives: ", half_lives
except IndexError:
print "Please provide the half life (or half lives) as argument(s)"
sys.exit()
dataPath = '/Users/walid/Dropbox/Tie_Decay_Centrality/Data/NHSadjList'
usersPath = '/Users/walid/Dropbox/Tie_Decay_Centrality/Data/NHSusersDict'
print "Loading data..."
dataAdjList = storage.load_obj(dataPath)
usersDict = storage.load_obj(usersPath)
print "Data loaded!"
nb_users = len(usersDict)
# sort list by time to get start and end times
print "Sorting data..."
dataAdjList = sorted(dataAdjList, key=operator.itemgetter(2))
print "Data sorted!"
t_initial = dataAdjList[0][2]
t_final = dataAdjList[-1][2]
# convert to dictionary for O(1) lookup
print "Converting to dictionary..."
dataAdjDict = tieDecayMat.convert_List_to_Dict(dataAdjList)
print "Converting to dictionary done!"
# specify the number of timepoints we'll sample the data at
nb_timepoints = 1000
total_seconds = (pd.to_datetime(t_final) - pd.to_datetime(t_initial)).total_seconds()
seconds_per_sample = int(total_seconds) / nb_timepoints
sampling_range = pd.date_range(start=t_initial, end=t_final,
freq=str(seconds_per_sample)+'s')
sampling_range_plus = sampling_range[1:]
# set threshold for eliminating small values
threshold = 10**(-7)
# set associated decay values
alphas = np.log(2)/half_lives/3600
# create storage variable for centrality scores and iterations
TD_PRs = np.zeros(shape=(nb_users, nb_timepoints), dtype=np.float32)
B = sparse.csr_matrix((nb_users,nb_users), dtype=np.float32)
# now iterate through the time range
print "Alphas progress: "
for hl,alpha in tqdm(zip(half_lives, alphas)):
print "\n Sampling range progress: "
for i,t in tqdm(enumerate(sampling_range_plus)):
t1 = t-pd.Timedelta(str(seconds_per_sample)+'s')
t2 = t
B = tieDecayMat.getDecayAdjBatch(dataAdjDict, t1, t2, B, nb_users, alpha)
B = B.multiply(B>=threshold)
# create network with B_t as adj matrix
G = nx.from_scipy_sparse_matrix(B, create_using=nx.DiGraph())
pr_t = nx.pagerank(G)
for u, score in pr_t.items():
TD_PRs[u][i] = float(score)
storage.save_obj(TD_PRs, 'TD_PRs'+'_alpha_'+str(hl))
| mit |
CKPalk/SeattleCrime_DM | DataMining/Stats/Weather/append_weather.py | 1 | 1706 |
from urllib.request import urlopen
from urllib.request import Request
import json
import sys
import pandas as pd
import numpy as np
date_format = '%Y-%m-%d'
datetime_format = date_format + ' %H:%M:%S'
def appendAttrsIfNeeded( df ):
if 'Temp' not in df.columns:
df['Temp'] = np.nan
if 'Rain' not in df.columns:
df['Rain'] = np.nan
return df
def findTempAndRain( json, date ):
date_str = date.strftime( date_format )
try:
loc1 = json[date_str]
loc2 = loc1['history']
loc3 = loc2['observations']
observation = loc3[ date.hour - 1]
temperature = float( observation[ 'tempi' ] )
rain = int( observation[ 'rain' ] )
return ( temperature, rain )
except:
return ( np.nan, np.nan )
def addWeatherData( json_filename, csv_filename ):
js = json.loads( open( json_filename, 'r' ).read() )
df = pd.read_csv( csv_filename, parse_dates=['Dates'], date_parser=lambda
x: pd.datetime.strptime(x, datetime_format ) )
df = appendAttrsIfNeeded( df )
for idx, row in df.iterrows():
if pd.isnull( row['Temp'] ) and pd.isnull( row['Rain'] ):
temp_rain = findTempAndRain( js, row['Dates'] )
print( "Setting temperature {} and rain {} for row {}".format( str(temp_rain[0]), str(temp_rain[1]), str(idx) ) )
df.set_value( idx, 'Temp', temp_rain[0] )
df.set_value( idx, 'Rain', temp_rain[1] )
else:
print( "Found temperature", row['Temp'] )
print( "Found rain", row['Rain'] )
print()
df.to_csv( csv_filename, index=False )
print( "File saved" )
#response = getWeatherJSON( 9, 6, 1994 )
#print( response['history']['dailysummary'][0]['meantempi'] )
def main( argv ):
addWeatherData( argv[1], argv[2] )
if __name__ == '__main__':
main( sys.argv )
| mit |
jayhetee/BDA_py_demos | demos_pystan/pystan_demo.py | 19 | 12220 | """Bayesian Data Analysis, 3rd ed
PyStan demo
Demo for using Stan with Python interface PyStan.
"""
import numpy as np
import pystan
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# ====== Bernoulli model =======================================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
for (n in 1:N)
y[n] ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[0,1,0,0,1,1,1,0,1,0])
fit = pystan.stan(model_code=bernoulli_code, data=data)
print(fit)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Vectorized Bernoulli model ============================================
bernoulli_code = """
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
"""
data = dict(N=10, y=[1,1,1,0,1,1,1,0,1,1])
fit = pystan.stan(model_code=bernoulli_code, data=data)
# ====== Binomial model ========================================================
binomial_code = """
data {
int<lower=0> N;
int<lower=0> y;
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ binomial(N,theta);
}
"""
data = dict(N=10, y=8)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Re-running Binomial model with new data ===============================
data = dict(N=10, y=10)
fit = pystan.stan(fit=fit, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['theta'], 50)
plt.show()
# ====== Comparison of two groups with Binomial ================================
binomial_code = """
data {
int<lower=0> N1;
int<lower=0> y1;
int<lower=0> N2;
int<lower=0> y2;
}
parameters {
real<lower=0,upper=1> theta1;
real<lower=0,upper=1> theta2;
}
transformed parameters {
real oddsratio;
oddsratio <- (theta2/(1-theta2))/(theta1/(1-theta1));
}
model {
theta1 ~ beta(1,1);
theta2 ~ beta(1,1);
y1 ~ binomial(N1,theta1);
y2 ~ binomial(N2,theta2);
}
"""
data = dict(N1=674, y1=39, N2=680, y2=22)
fit = pystan.stan(model_code=binomial_code, data=data)
samples = fit.extract(permuted=True)
plt.hist(samples['oddsratio'], 50)
plt.show()
# ====== Gaussian linear model =================================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N=N, x=x, y=y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with adjustable priors ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
real pmualpha; // prior mean for alpha
real psalpha; // prior std for alpha
real pmubeta; // prior mean for beta
real psbeta; // prior std for beta
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
alpha ~ normal(pmualpha,psalpha);
beta ~ normal(pmubeta,psbeta);
y ~ normal(mu, sigma);
}
"""
# Data for Stan
d = np.loadtxt('kilpisjarvi-summer-temp.csv', dtype=np.double, delimiter=';',
skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
x = x,
y = y,
pmualpha = y.mean(), # Centered
psalpha = (14-4)/6.0, # avg temp between 4-14
pmubeta = 0, # a priori increase and decrese as likely
psbeta = (.1--.1)/6.0 # avg temp probably does not increase more than 1
# degree per 10 years
)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear model with standardized data ==========================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
transformed data {
vector[N] x_std;
vector[N] y_std;
x_std <- (x - mean(x)) / sd(x);
y_std <- (y - mean(y)) / sd(y);
}
parameters {
real alpha;
real beta;
real<lower=0> sigma_std;
}
transformed parameters {
vector[N] mu_std;
mu_std <- alpha + beta*x_std;
}
model {
alpha ~ normal(0,1);
beta ~ normal(0,1);
y_std ~ normal(mu_std, sigma_std);
}
generated quantities {
vector[N] mu;
real<lower=0> sigma;
mu <- mean(y) + mu_std*sd(y);
sigma <- sigma_std*sd(y);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,10))
plt.subplot(3,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(3,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(3,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.tight_layout()
plt.show()
# ====== Gaussian linear student-t model =======================================
linear_code = """
data {
int<lower=0> N; // number of data points
vector[N] x; //
vector[N] y; //
}
parameters {
real alpha;
real beta;
real<lower=0> sigma;
real<lower=1,upper=80> nu;
}
transformed parameters {
vector[N] mu;
mu <- alpha + beta*x;
}
model {
nu ~ gamma(2,0.1); // Juarez and Steel (2010)
y ~ student_t(nu, mu, sigma);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
x = np.repeat(d[:,0], 4)
y = d[:,1:5].ravel()
N = len(x)
data = dict(N = N, x = x, y = y)
# Compile and fit the model
fit = pystan.stan(model_code=linear_code, data=data)
# Plot
samples = fit.extract(permuted=True)
plt.figure(figsize=(8,12))
plt.subplot(4,1,1)
plt.plot(x,
np.percentile(samples['mu'], 50, axis=0),
color='#e41a1c',
linewidth=1
)
plt.plot(
x,
np.asarray(np.percentile(samples['mu'], [5, 95], axis=0)).T,
color='#e41a1c',
linestyle='--',
linewidth=1,
)
plt.scatter(x, y, 5, color='#377eb8')
plt.xlabel('Year')
plt.ylabel('Summer temperature at Kilpisjarvi')
plt.xlim((1952,2013))
plt.subplot(4,1,2)
plt.hist(samples['beta'], 50)
plt.xlabel('beta')
print 'Pr(beta > 0) = {}'.format(np.mean(samples['beta']>0))
plt.subplot(4,1,3)
plt.hist(samples['sigma'], 50)
plt.xlabel('sigma')
plt.subplot(4,1,4)
plt.hist(samples['nu'], 50)
plt.xlabel('nu')
plt.tight_layout()
plt.show()
# ====== Comparison of k groups (ANOVA) ========================================
group_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=group_code, data=data)
# Analyse results
mu = fit.extract(permuted=True)['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
# ====== Hierarchical prior model for comparison of k groups (ANOVA) ===========
# results do not differ much from the previous, because there is only
# few groups and quite much data per group, but this works as an example anyway
hier_code = """
data {
int<lower=0> N; // number of data points
int<lower=0> K; // number of groups
int<lower=1,upper=K> x[N]; // group indicator
vector[N] y; //
}
parameters {
real mu0; // prior mean
real<lower=0> sigma0; // prior std
vector[K] mu; // group means
vector<lower=0>[K] sigma; // group stds
}
model {
mu0 ~ normal(10,10); // weakly informative prior
sigma0 ~ cauchy(0,4); // weakly informative prior
mu ~ normal(mu0, sigma0); // population prior with unknown parameters
for (n in 1:N)
y[n] ~ normal(mu[x[n]], sigma[x[n]]);
}
"""
# Data for Stan
data_path = '../utilities_and_data/kilpisjarvi-summer-temp.csv'
d = np.loadtxt(data_path, dtype=np.double, delimiter=';', skiprows=1)
# Is there difference between different summer months?
x = np.tile(np.arange(1,5), d.shape[0]) # summer months are numbered from 1 to 4
y = d[:,1:5].ravel()
N = len(x)
data = dict(
N = N,
K = 4, # 4 groups
x = x, # group indicators
y = y # observations
)
# Compile and fit the model
fit = pystan.stan(model_code=hier_code, data=data)
# Analyse results
samples = fit.extract(permuted=True)
print "std(mu0): {}".format(np.std(samples['mu0']))
mu = samples['mu']
# Matrix of probabilities that one mu is larger than other
ps = np.zeros((4,4))
for k1 in range(4):
for k2 in range(k1+1,4):
ps[k1,k2] = np.mean(mu[:,k1]>mu[:,k2])
ps[k2,k1] = 1 - ps[k1,k2]
print "Matrix of probabilities that one mu is larger than other:"
print ps
# Plot
plt.boxplot(mu)
plt.show()
| gpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/computation/expressions.py | 7 | 7788 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas.core.common import _values_from_object
from pandas.computation import _NUMEXPR_INSTALLED
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
'where': set(['int64', 'float64', 'bool'])
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatiblity
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= set([o.dtype.name])
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value %s b_value' % op_str,
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
if raise_on_error:
raise
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b, raise_on_error)
return result
def _where_standard(cond, a, b, raise_on_error=True):
return np.where(_values_from_object(cond), _values_from_object(a),
_values_from_object(b))
def _where_numexpr(cond, a, b, raise_on_error=False):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
if raise_on_error:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b, raise_on_error)
return result
# turn myself on
set_use_numexpr(True)
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.blocks
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the %r operator"
" is not supported by numexpr for the bool "
"dtype, use %r instead" % (op_str,
unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator %r not implemented for bool "
"dtypes" % op_str)
return True
def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
raise_on_error : pass the error to the higher level if indicated
(default is False), otherwise evaluate the op with and
return the results
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error,
**eval_kwargs)
return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
def where(cond, a, b, raise_on_error=False, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
raise_on_error : pass the error to the higher level if indicated
(default is False), otherwise evaluate the op with and
return the results
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b, raise_on_error=raise_on_error)
return _where_standard(cond, a, b, raise_on_error=raise_on_error)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
| apache-2.0 |
PythonProgramming/Support-Vector-Machines---Basics-and-Fundamental-Investing-Project | p12.py | 1 | 8235 | import pandas as pd
import os
import time
from datetime import datetime
import re
from time import mktime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
style.use("dark_background")
# path = "X:/Backups/intraQuarter" # for Windows with X files :)
# if git clone'ed then use relative path,
# assuming you extracted the downloaded zip into this project's folder:
path = "intraQuarter"
def Key_Stats(
gather=[
"Total Debt/Equity",
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior '
]
):
statspath = path+'/_KeyStats'
stock_list = [x[0] for x in os.walk(statspath)]
df = pd.DataFrame(
columns = [
'Date',
'Unix',
'Ticker',
'Price',
'stock_p_change',
'SP500',
'sp500_p_change',
'Difference',
##############
'DE Ratio',
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior ',
##############
'Status'
]
)
sp500_df = pd.DataFrame.from_csv("YAHOO-INDEX_GSPC.csv")
ticker_list = []
for each_dir in stock_list[1:]:
each_file = os.listdir(each_dir)
# ticker = each_dir.split("\\")[1] # Windows only
# ticker = each_dir.split("/")[1] # this didn't work so do this:
ticker = os.path.basename(os.path.normpath(each_dir))
# print(ticker) # uncomment to verify
ticker_list.append(ticker)
starting_stock_value = False
starting_sp500_value = False
if len(each_file) > 0:
for file in each_file:
date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')
unix_time = time.mktime(date_stamp.timetuple())
full_file_path = each_dir+'/'+file
source = open(full_file_path,'r').read()
try:
value_list = []
for each_data in gather:
try:
regex = re.escape(each_data) + r'.*?(\d{1,8}\.\d{1,8}M?B?|N/A)%?</td>'
value = re.search(regex, source)
value = (value.group(1))
if "B" in value:
value = float(value.replace("B",''))*1000000000
elif "M" in value:
value = float(value.replace("M",''))*1000000
value_list.append(value)
except Exception as e:
value = "N/A"
value_list.append(value)
try:
sp500_date = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
row = sp500_df[(sp500_df.index == sp500_date)]
sp500_value = float(row["Adjusted Close"])
except:
sp500_date = datetime.fromtimestamp(unix_time-259200).strftime('%Y-%m-%d')
row = sp500_df[(sp500_df.index == sp500_date)]
sp500_value = float(row["Adjusted Close"])
try:
stock_price = float(source.split('</small><big><b>')[1].split('</b></big>')[0])
except Exception as e:
# <span id="yfs_l10_afl">43.27</span>
try:
stock_price = (source.split('</small><big><b>')[1].split('</b></big>')[0])
stock_price = re.search(r'(\d{1,8}\.\d{1,8})',stock_price)
stock_price = float(stock_price.group(1))
#print(stock_price)
except Exception as e:
try:
stock_price = (source.split('<span class="time_rtq_ticker">')[1].split('</span>')[0])
stock_price = re.search(r'(\d{1,8}\.\d{1,8})',stock_price)
stock_price = float(stock_price.group(1))
except Exception as e:
print(str(e),'stock_price 3rd try+except',file,ticker)
#print('Latest:',stock_price)
#print('stock price',str(e),ticker,file)
#time.sleep(15)
#print("stock_price:",stock_price,"ticker:", ticker)
if not starting_stock_value:
starting_stock_value = stock_price
if not starting_sp500_value:
starting_sp500_value = sp500_value
stock_p_change = ((stock_price - starting_stock_value) / starting_stock_value) * 100
sp500_p_change = ((sp500_value - starting_sp500_value) / starting_sp500_value) * 100
difference = stock_p_change - sp500_p_change
if difference > 0:
status = "outperform"
else:
status = "underperform"
if value_list.count("N/A") > 0:
pass
else:
df = df.append(
{
'Date':date_stamp,
'Unix':unix_time,
'Ticker':ticker,
'Price':stock_price,
'stock_p_change':stock_p_change,
'SP500':sp500_value,
'sp500_p_change':sp500_p_change,
'Difference':difference,
'DE Ratio':value_list[0],
#'Market Cap':value_list[1],
'Trailing P/E':value_list[1],
'Price/Sales':value_list[2],
'Price/Book':value_list[3],
'Profit Margin':value_list[4],
'Operating Margin':value_list[5],
'Return on Assets':value_list[6],
'Return on Equity':value_list[7],
'Revenue Per Share':value_list[8],
'Market Cap':value_list[9],
'Enterprise Value':value_list[10],
'Forward P/E':value_list[11],
'PEG Ratio':value_list[12],
'Enterprise Value/Revenue':value_list[13],
'Enterprise Value/EBITDA':value_list[14],
'Revenue':value_list[15],
'Gross Profit':value_list[16],
'EBITDA':value_list[17],
'Net Income Avl to Common ':value_list[18],
'Diluted EPS':value_list[19],
'Earnings Growth':value_list[20],
'Revenue Growth':value_list[21],
'Total Cash':value_list[22],
'Total Cash Per Share':value_list[23],
'Total Debt':value_list[24],
'Current Ratio':value_list[25],
'Book Value Per Share':value_list[26],
'Cash Flow':value_list[27],
'Beta':value_list[28],
'Held by Insiders':value_list[29],
'Held by Institutions':value_list[30],
'Shares Short (as of':value_list[31],
'Short Ratio':value_list[32],
'Short % of Float':value_list[33],
'Shares Short (prior ':value_list[34],
'Status':status
},
ignore_index=True)
except Exception as e:
pass
df.to_csv("key_stats.csv")
Key_Stats()
| mit |
harisbal/pandas | pandas/tests/tseries/test_holiday.py | 16 | 16104 | import pytest
from datetime import datetime
import pandas.util.testing as tm
from pandas import compat
from pandas import DatetimeIndex
from pandas.tseries.holiday import (USFederalHolidayCalendar, USMemorialDay,
USThanksgivingDay, nearest_workday,
next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday,
DateOffset, MO, SA, Timestamp,
AbstractHolidayCalendar, get_calendar,
HolidayCalendarFactory, next_workday,
previous_workday, before_nearest_workday,
EasterMonday, GoodFriday,
after_nearest_workday, weekend_to_monday,
USLaborDay, USColumbusDay,
USMartinLutherKingJr, USPresidentsDay)
from pytz import utc
class TestCalendar(object):
def setup_method(self, method):
self.holiday_list = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25)]
self.start_date = datetime(2012, 1, 1)
self.end_date = datetime(2012, 12, 31)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(self.start_date, self.end_date)
holidays_1 = calendar.holidays(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = calendar.holidays(
Timestamp(self.start_date),
Timestamp(self.end_date))
assert list(holidays.to_pydatetime()) == self.holiday_list
assert list(holidays_1.to_pydatetime()) == self.holiday_list
assert list(holidays_2.to_pydatetime()) == self.holiday_list
def test_calendar_caching(self):
# Test for issue #9552
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super(TestCalendar, self).__init__(name=name, rules=rules)
jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)])
tm.assert_index_equal(jan1.holidays(), DatetimeIndex(['01-Jan-2015']))
tm.assert_index_equal(jan2.holidays(), DatetimeIndex(['02-Jan-2015']))
def test_calendar_observance_dates(self):
# Test for issue 11477
USFedCal = get_calendar('USFederalHolidayCalendar')
holidays0 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
holidays1 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 6)) # <-- different start and end dates
holidays2 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
tm.assert_index_equal(holidays0, holidays1)
tm.assert_index_equal(holidays0, holidays2)
def test_rule_from_name(self):
USFedCal = get_calendar('USFederalHolidayCalendar')
assert USFedCal.rule_from_name('Thanksgiving') == USThanksgivingDay
class TestHoliday(object):
def setup_method(self, method):
self.start_date = datetime(2011, 1, 1)
self.end_date = datetime(2020, 12, 31)
def check_results(self, holiday, start, end, expected):
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert (list(holiday.dates(utc.localize(Timestamp(start)),
utc.localize(Timestamp(end)))) ==
[utc.localize(dt) for dt in expected])
def test_usmemorialday(self):
self.check_results(holiday=USMemorialDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
], )
def test_non_observed_holiday(self):
self.check_results(
Holiday('July 4th Eve', month=7, day=3),
start="2001-01-01",
end="2003-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00')
]
)
self.check_results(
Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)),
start="2001-01-01",
end="2008-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00'),
Timestamp('2003-07-03 00:00:00'),
Timestamp('2006-07-03 00:00:00'),
Timestamp('2007-07-03 00:00:00'),
]
)
def test_easter(self):
self.check_results(EasterMonday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-25 00:00:00'),
Timestamp('2012-04-09 00:00:00'),
Timestamp('2013-04-01 00:00:00'),
Timestamp('2014-04-21 00:00:00'),
Timestamp('2015-04-06 00:00:00'),
Timestamp('2016-03-28 00:00:00'),
Timestamp('2017-04-17 00:00:00'),
Timestamp('2018-04-02 00:00:00'),
Timestamp('2019-04-22 00:00:00'),
Timestamp('2020-04-13 00:00:00'),
], )
self.check_results(GoodFriday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-22 00:00:00'),
Timestamp('2012-04-06 00:00:00'),
Timestamp('2013-03-29 00:00:00'),
Timestamp('2014-04-18 00:00:00'),
Timestamp('2015-04-03 00:00:00'),
Timestamp('2016-03-25 00:00:00'),
Timestamp('2017-04-14 00:00:00'),
Timestamp('2018-03-30 00:00:00'),
Timestamp('2019-04-19 00:00:00'),
Timestamp('2020-04-10 00:00:00'),
], )
def test_usthanksgivingday(self):
self.check_results(USThanksgivingDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
], )
def test_holidays_within_dates(self):
# Fix holiday behavior found in #11477
# where holiday.dates returned dates outside start/end date
# or observed rules could not be applied as the holiday
# was not in the original date range (e.g., 7/4/2015 -> 7/3/2015)
start_date = datetime(2015, 7, 1)
end_date = datetime(2015, 7, 1)
calendar = get_calendar('USFederalHolidayCalendar')
new_years = calendar.rule_from_name('New Years Day')
july_4th = calendar.rule_from_name('July 4th')
veterans_day = calendar.rule_from_name('Veterans Day')
christmas = calendar.rule_from_name('Christmas')
# Holiday: (start/end date, holiday)
holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"),
USLaborDay: ("2015-09-07", "2015-09-07"),
USColumbusDay: ("2015-10-12", "2015-10-12"),
USThanksgivingDay: ("2015-11-26", "2015-11-26"),
USMartinLutherKingJr: ("2015-01-19", "2015-01-19"),
USPresidentsDay: ("2015-02-16", "2015-02-16"),
GoodFriday: ("2015-04-03", "2015-04-03"),
EasterMonday: [("2015-04-06", "2015-04-06"),
("2015-04-05", [])],
new_years: [("2015-01-01", "2015-01-01"),
("2011-01-01", []),
("2010-12-31", "2010-12-31")],
july_4th: [("2015-07-03", "2015-07-03"),
("2015-07-04", [])],
veterans_day: [("2012-11-11", []),
("2012-11-12", "2012-11-12")],
christmas: [("2011-12-25", []),
("2011-12-26", "2011-12-26")]}
for rule, dates in compat.iteritems(holidays):
empty_dates = rule.dates(start_date, end_date)
assert empty_dates.tolist() == []
if isinstance(dates, tuple):
dates = [dates]
for start, expected in dates:
if len(expected):
expected = [Timestamp(expected)]
self.check_results(rule, start, start, expected)
def test_argument_types(self):
holidays = USThanksgivingDay.dates(self.start_date, self.end_date)
holidays_1 = USThanksgivingDay.dates(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = USThanksgivingDay.dates(
Timestamp(self.start_date),
Timestamp(self.end_date))
tm.assert_index_equal(holidays, holidays_1)
tm.assert_index_equal(holidays, holidays_2)
def test_special_holidays(self):
base_date = [datetime(2012, 5, 28)]
holiday_1 = Holiday('One-Time', year=2012, month=5, day=28)
holiday_2 = Holiday('Range', month=5, day=28,
start_date=datetime(2012, 1, 1),
end_date=datetime(2012, 12, 31),
offset=DateOffset(weekday=MO(1)))
assert base_date == holiday_1.dates(self.start_date, self.end_date)
assert base_date == holiday_2.dates(self.start_date, self.end_date)
def test_get_calendar(self):
class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar('TestCalendar')
assert TestCalendar == calendar.__class__
def test_factory(self):
class_1 = HolidayCalendarFactory('MemorialDay',
AbstractHolidayCalendar,
USMemorialDay)
class_2 = HolidayCalendarFactory('Thansksgiving',
AbstractHolidayCalendar,
USThanksgivingDay)
class_3 = HolidayCalendarFactory('Combined', class_1, class_2)
assert len(class_1.rules) == 1
assert len(class_2.rules) == 1
assert len(class_3.rules) == 2
class TestObservanceRules(object):
def setup_method(self, method):
self.we = datetime(2014, 4, 9)
self.th = datetime(2014, 4, 10)
self.fr = datetime(2014, 4, 11)
self.sa = datetime(2014, 4, 12)
self.su = datetime(2014, 4, 13)
self.mo = datetime(2014, 4, 14)
self.tu = datetime(2014, 4, 15)
def test_next_monday(self):
assert next_monday(self.sa) == self.mo
assert next_monday(self.su) == self.mo
def test_next_monday_or_tuesday(self):
assert next_monday_or_tuesday(self.sa) == self.mo
assert next_monday_or_tuesday(self.su) == self.tu
assert next_monday_or_tuesday(self.mo) == self.tu
def test_previous_friday(self):
assert previous_friday(self.sa) == self.fr
assert previous_friday(self.su) == self.fr
def test_sunday_to_monday(self):
assert sunday_to_monday(self.su) == self.mo
def test_nearest_workday(self):
assert nearest_workday(self.sa) == self.fr
assert nearest_workday(self.su) == self.mo
assert nearest_workday(self.mo) == self.mo
def test_weekend_to_monday(self):
assert weekend_to_monday(self.sa) == self.mo
assert weekend_to_monday(self.su) == self.mo
assert weekend_to_monday(self.mo) == self.mo
def test_next_workday(self):
assert next_workday(self.sa) == self.mo
assert next_workday(self.su) == self.mo
assert next_workday(self.mo) == self.tu
def test_previous_workday(self):
assert previous_workday(self.sa) == self.fr
assert previous_workday(self.su) == self.fr
assert previous_workday(self.tu) == self.mo
def test_before_nearest_workday(self):
assert before_nearest_workday(self.sa) == self.th
assert before_nearest_workday(self.su) == self.fr
assert before_nearest_workday(self.tu) == self.mo
def test_after_nearest_workday(self):
assert after_nearest_workday(self.sa) == self.mo
assert after_nearest_workday(self.su) == self.tu
assert after_nearest_workday(self.fr) == self.mo
class TestFederalHolidayCalendar(object):
def test_no_mlk_before_1986(self):
# see gh-10278
class MLKCalendar(AbstractHolidayCalendar):
rules = [USMartinLutherKingJr]
holidays = MLKCalendar().holidays(start='1984',
end='1988').to_pydatetime().tolist()
# Testing to make sure holiday is not incorrectly observed before 1986
assert holidays == [datetime(1986, 1, 20, 0, 0),
datetime(1987, 1, 19, 0, 0)]
def test_memorial_day(self):
class MemorialDay(AbstractHolidayCalendar):
rules = [USMemorialDay]
holidays = MemorialDay().holidays(start='1971',
end='1980').to_pydatetime().tolist()
# Fixes 5/31 error and checked manually against Wikipedia
assert holidays == [datetime(1971, 5, 31, 0, 0),
datetime(1972, 5, 29, 0, 0),
datetime(1973, 5, 28, 0, 0),
datetime(1974, 5, 27, 0, 0),
datetime(1975, 5, 26, 0, 0),
datetime(1976, 5, 31, 0, 0),
datetime(1977, 5, 30, 0, 0),
datetime(1978, 5, 29, 0, 0),
datetime(1979, 5, 28, 0, 0)]
class TestHolidayConflictingArguments(object):
def test_both_offset_observance_raises(self):
# see gh-10217
with pytest.raises(NotImplementedError):
Holiday("Cyber Monday", month=11, day=1,
offset=[DateOffset(weekday=SA(4))],
observance=next_monday)
| bsd-3-clause |
barentsen/exoplanet-charts | k2-planets/k2-planets-for-atmospheric-characterization.py | 1 | 2685 | import matplotlib.pyplot as pl
from matplotlib.ticker import MultipleLocator
from matplotlib import style
import seaborn as sns
import pandas as pd
SHOW_KEPLER = True #True
K_MAGNITUDE_CUT = 11
OUTPUT_PREFIX = 'k2-planets-for-atmospheric-characterization'
OUTPUT_SUFFIX = '.png'
if SHOW_KEPLER:
OUTPUT_PREFIX += '-with-kepler'
palette = sns.color_palette(['#f1c40f', '#2980b9'])
style.use('../styles/black.mplstyle')
k2_df = pd.read_csv('../data/k2-candidate-planets.csv')
k2_df = k2_df[k2_df.st_k2 <= K_MAGNITUDE_CUT]
print('Plotting {} points.'.format(len(k2_df)))
fig = pl.figure(figsize=(8, 4.5))
pl.fill_between([.5, .5, 2.5, 2.5, .5],
[2000, 4000, 4000, 2000, 2000],
zorder=-1, alpha=0.3,
facecolor=palette[1],
lw=0)
pl.plot([.5, .5, 2.5, 2.5, .5],
[2000, 4000, 4000, 2000, 2000],
zorder=-1, alpha=1,
color='white',
lw=1.5, linestyle='dotted',
dashes=[2, 4])
grp = k2_df.groupby('epic_candname')
pl.scatter(grp.pl_rade.mean(), grp.st_teff.mean(),
lw=0.4, s=35, label='K2',
facecolor=palette[0], edgecolor='black',
zorder=30)
if SHOW_KEPLER:
kepler_df = pd.read_csv('../data/kepler-candidate-planets.csv')
kepler_df = kepler_df[kepler_df.koi_kmag <= K_MAGNITUDE_CUT]
pl.scatter(kepler_df.koi_prad, kepler_df.koi_steff,
lw=0.4, s=35,
label='Kepler',
facecolor='#2980b9',
edgecolor='black',
zorder=20)
pl.legend(bbox_to_anchor=(0., 1., 1., 0.),
loc=8,
ncol=2,
borderaxespad=0.,
handlelength=0.8,
frameon=False,
scatterpoints=3)
# Annotations
pl.annotate("Earth and Super Earth-size Candidates\n"
"Orbiting Cool Dwarfs",
style='italic',
xy=(2.5, 2900), xycoords='data',
xytext=(2.8, 2900), textcoords='data',
va="center", size=12,
arrowprops=dict(arrowstyle="-", lw=1)
)
pl.suptitle("Planet Candidates for Atmospheric Characterization (Ks < 11)")
# Axes
pl.yticks([3000, 4000, 5000, 6000, 7000],
['3,000 K', '4,000 K', '5,000 K', '6,000 K', '7,000 K'])
pl.axes().xaxis.set_minor_locator(MultipleLocator(0.5))
pl.axes().yaxis.set_minor_locator(MultipleLocator(500))
pl.xlim(0.0, 5)
pl.ylim(2250, 7000)
pl.xlabel('Planet Size Relative to Earth (Radius)')
pl.ylabel('Host Star Temperature')
pl.tight_layout(rect=(0, 0, 1, 0.92))
output_fn = OUTPUT_PREFIX + OUTPUT_SUFFIX
print('Writing {}'.format(output_fn))
pl.savefig(output_fn, dpi=200)
pl.close()
| mit |
themrmax/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
openelections/openelections-data-nv | scripts/nv-parser.py | 1 | 3163 | from __future__ import division, print_function
""" Parse Nevada election results into OpenElection format
available here:
http://nvsos.gov/sos/elections/election-information/precinct-level-results
"""
import pandas as pd
import requests
from nameparser import HumanName
positions = ['President', 'U.S. Senate', 'U.S. House',
'State Senate','State House', 'Governor', 'Secretary of State',
'Attorney General']
header_map = {'jurisdiction':'county', 'precinct':'precinct',
'contest':'office', 'district':'district', 'party':'party',
'selection':'candidate', 'votes':'votes'}
def parser(file, **kwargs):
""" Generic parser for NV election results
"""
# read in the file
df = pd.read_csv(file, **kwargs)
# assign headers in lowercase
df.columns = [x.lower() for x in df.columns]
# extract district information from the contest column
df['district'] = df['contest'].str.extract('(\d{1,3})', expand=True)
df['precinct'] = df['precinct'].str.extract('(\d{1,3})', expand=False)
df.loc[df['contest'].str.contains(
'(Republican)', case=False), 'party'] = 'Republican'
df.loc[df['contest'].str.contains(
'(Democratic)', case=False), 'party'] = 'Democratic'
df.contest.unique()
# clean up office descriptions
df.loc[df['contest'].str.contains(
'PRESIDENT AND VICE', case=False), 'contest'] = 'President'
df.loc[df['contest'].str.contains(
'Governor', case=False), 'contest'] = 'Governor'
df.loc[df['contest'].str.contains(
'UNITED STATES SENATOR', case=False), 'contest'] = 'U.S. Senate'
df.loc[df['contest'].str.contains(
'U.S. REPRESENTATIVE', case=False), 'contest'] = 'U.S. House'
df.loc[df['contest'].str.contains(
'Lieutenant Governor', case=False), 'contest'] = 'Lieutenant Governor'
df.loc[df['contest'].str.contains(
'Governor', case=False), 'contest'] = 'Governor'
df.loc[df['contest'].str.contains(
'Attorney General', case=False), 'contest'] = 'Attorney General'
df.loc[df['contest'].str.contains(
'Secretary Of State', case=False), 'contest'] = 'Secretary of State'
df.loc[df['contest'].str.contains(
'STATE ASSEMBLY', case=False), 'contest'] = 'State House'
df.loc[df['contest'].str.contains(
'STATE SENATE', case=False), 'contest'] = 'State Senate'
# select only the positions of interest
df = df[df['contest'].isin(positions)].copy()
df.votes = pd.to_numeric(df.votes, errors='coerce')
# reverse naming convention from last, first to first last
names = []
for index, row in df.iterrows():
name = HumanName(row['selection'])
names.append(" ".join([name.first, name.last, name.suffix]))
names = pd.Series(names)
df.selection = names.values
df.selection = df.selection.str.title()
df.selection = df.selection.str.strip()
df = df.rename(columns = header_map)
return df
if __name__ == '__main__':
df = parser('scripts/primary_2018.csv')
df.to_csv('2018/20180612__nv__primary__precinct.csv',
index=False, float_format='%.0f')
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/timedeltas/test_tools.py | 2 | 8150 | from datetime import time, timedelta
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas as pd
from pandas import Series, TimedeltaIndex, isna, to_timedelta
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestTimedeltas:
def test_to_timedelta(self):
def conv(v):
return v.astype("m8[ns]")
d1 = np.timedelta64(1, "D")
with tm.assert_produces_warning(FutureWarning):
assert to_timedelta("1 days 06:05:01.00003", box=False) == conv(
d1
+ np.timedelta64(6 * 3600 + 5 * 60 + 1, "s")
+ np.timedelta64(30, "us")
)
with tm.assert_produces_warning(FutureWarning):
assert to_timedelta("15.5us", box=False) == conv(
np.timedelta64(15500, "ns")
)
# empty string
result = to_timedelta("", box=False)
assert result.astype("int64") == iNaT
result = to_timedelta(["", ""])
assert isna(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, "s")]))
expected = pd.Index(np.array([np.timedelta64(1, "s")]))
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# ints
result = np.timedelta64(0, "ns")
expected = to_timedelta(0, box=False)
assert result == expected
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(["1d", "1days 00:00:01"]))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex(
[np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")]
)
expected = to_timedelta([0, 10], unit="s")
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
with tm.assert_produces_warning(FutureWarning):
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
# arrays of various dtypes
arr = np.array([1] * 5, dtype="int64")
result = to_timedelta(arr, unit="s")
expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="int64")
result = to_timedelta(arr, unit="m")
expected = TimedeltaIndex([np.timedelta64(1, "m")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="int64")
result = to_timedelta(arr, unit="h")
expected = TimedeltaIndex([np.timedelta64(1, "h")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="timedelta64[s]")
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype="timedelta64[D]")
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, "D")] * 5)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype="timedelta64[ns]")
result = to_timedelta(range(3), unit="s", box=False)
tm.assert_numpy_array_equal(expected, result)
with tm.assert_produces_warning(FutureWarning):
result = to_timedelta(np.arange(3), unit="s", box=False)
tm.assert_numpy_array_equal(expected, result)
with tm.assert_produces_warning(FutureWarning):
result = to_timedelta([0, 1, 2], unit="s", box=False)
tm.assert_numpy_array_equal(expected, result)
with tm.assert_produces_warning(FutureWarning):
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype="timedelta64[ns]"
)
result = to_timedelta([0.0, 0.5, 0.8, 1.2], unit="s", box=False)
tm.assert_numpy_array_equal(expected, result)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
with pytest.raises(ValueError, match=msg):
to_timedelta(["foo"], errors="never")
# these will error
msg = "invalid unit abbreviation: foo"
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit="foo")
with pytest.raises(ValueError, match=msg):
to_timedelta(1, unit="foo")
# time not supported ATM
msg = (
"Value must be Timedelta, string, integer, float, timedelta or"
" convertible"
)
with pytest.raises(ValueError, match=msg):
to_timedelta(time(second=1))
assert to_timedelta(time(second=1), errors="coerce") is pd.NaT
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
to_timedelta(["foo", "bar"])
tm.assert_index_equal(
TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(["foo", "bar"], errors="coerce"),
)
tm.assert_index_equal(
TimedeltaIndex(["1 day", pd.NaT, "1 min"]),
to_timedelta(["1 day", "bar", "1 min"], errors="coerce"),
)
# gh-13613: these should not error because errors='ignore'
invalid_data = "apple"
assert invalid_data == to_timedelta(invalid_data, errors="ignore")
invalid_data = ["apple", "1 days"]
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors="ignore"),
)
invalid_data = pd.Index(["apple", "1 days"])
tm.assert_index_equal(invalid_data, to_timedelta(invalid_data, errors="ignore"))
invalid_data = Series(["apple", "1 days"])
tm.assert_series_equal(
invalid_data, to_timedelta(invalid_data, errors="ignore")
)
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, "s")])
result = Series(["00:00:01"]).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta("00:00:01")])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64("NaT")
actual = pd.to_timedelta(Series(["00:00:01", np.nan]))
expected = Series(
[np.timedelta64(1000000000, "ns"), timedelta_NaT], dtype="<m8[ns]"
)
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(["00:00:01", pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype("int64")
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype("int64")
def test_to_timedelta_float(self):
# https://github.com/pandas-dev/pandas/issues/25077
arr = np.arange(0, 1, 1e-6)[-10:]
result = pd.to_timedelta(arr, unit="s")
expected_asi8 = np.arange(999990000, int(1e9), 1000, dtype="int64")
tm.assert_numpy_array_equal(result.asi8, expected_asi8)
def test_to_timedelta_box_deprecated(self):
result = np.timedelta64(0, "ns")
# Deprecated - see GH24416
with tm.assert_produces_warning(FutureWarning):
to_timedelta(0, box=False)
expected = to_timedelta(0).to_timedelta64()
assert result == expected
| apache-2.0 |
aroth85/aroth85.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
deepesch/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
RedhawkSDR/integration-gnuhawk | gnuradio/gr-utils/src/python/plot_psd_base.py | 75 | 12725 | #!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
wright-group/diagram_artist | examples/WMEL_examples.py | 1 | 11119 | import matplotlib.pyplot as plt
import diagram_artist.WMEL as WMEL
#off-diagonal TRIEE-------------------------------------------------------------
off_diagonal = WMEL.diagram(size = [6, 2],
energies = [0., 0.43, 0.57, 1.],
state_names = ['g', 'a', 'b', 'a+b'])
off_diagonal.label_rows([r'$\mathrm{\alpha}$', r'$\mathrm{\beta}$', r'$\mathrm{\gamma}$'])
off_diagonal.label_columns(['I', 'II', 'III', 'IV', 'V', 'VI'])
#pw1 alpha
off_diagonal.add_arrow([0, 0], 0, [0, 1], 'ket', '1')
off_diagonal.add_arrow([0, 0], 1, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([0, 0], 2, [2, 0], 'bra', '2\'')
off_diagonal.add_arrow([0, 0], 3, [1, 0], 'out')
#pw1 beta
off_diagonal.add_arrow([0, 1], 0, [0, 1], 'ket', '1')
off_diagonal.add_arrow([0, 1], 1, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([0, 1], 2, [1, 3], 'ket', '2\'')
off_diagonal.add_arrow([0, 1], 3, [3, 2], 'out')
#pw2 alpha
off_diagonal.add_arrow([1, 0], 0, [0, 1], 'ket', '1')
off_diagonal.add_arrow([1, 0], 1, [1, 3], 'ket', '2\'')
off_diagonal.add_arrow([1, 0], 2, [3, 1], 'ket', '-2')
off_diagonal.add_arrow([1, 0], 3, [1, 0], 'out')
#pw2 beta
off_diagonal.add_arrow([1, 1], 0, [0, 1], 'ket', '1')
off_diagonal.add_arrow([1, 1], 1, [1, 3], 'ket', '2\'')
off_diagonal.add_arrow([1, 1], 2, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([1, 1], 3, [3, 2], 'out')
#pw3 alpha
off_diagonal.add_arrow([2, 0], 0, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([2, 0], 1, [0, 1], 'ket', '1')
off_diagonal.add_arrow([2, 0], 2, [2, 0], 'bra', '2\'')
off_diagonal.add_arrow([2, 0], 3, [1, 0], 'out')
#pw3 beta
off_diagonal.add_arrow([2, 1], 0, [0, 2], 'ket', '-2')
off_diagonal.add_arrow([2, 1], 1, [0, 1], 'ket', '1')
off_diagonal.add_arrow([2, 1], 2, [1, 3], 'bra', '2\'')
off_diagonal.add_arrow([2, 1], 3, [3, 2], 'out')
#pw4 alpha
off_diagonal.add_arrow([3, 0], 0, [0, 2], 'ket', '2\'')
off_diagonal.add_arrow([3, 0], 1, [2, 3], 'ket', '1')
off_diagonal.add_arrow([3, 0], 2, [3, 1], 'ket', '-2')
off_diagonal.add_arrow([3, 0], 3, [1, 0], 'out')
#pw4 beta
off_diagonal.add_arrow([3, 1], 0, [0, 2], 'ket', '2\'')
off_diagonal.add_arrow([3, 1], 1, [2, 3], 'ket', '1')
off_diagonal.add_arrow([3, 1], 2, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([3, 1], 3, [3, 2], 'out')
#pw5 alpha
off_diagonal.add_arrow([4, 0], 0, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([4, 0], 1, [2, 0], 'bra', '2\'')
off_diagonal.add_arrow([4, 0], 2, [0, 1], 'ket', '1')
off_diagonal.add_arrow([4, 0], 3, [1, 0], 'out')
#pw5 beta
off_diagonal.add_arrow([4, 1], 0, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([4, 1], 1, [0, 2], 'ket', '2\'')
off_diagonal.add_arrow([4, 1], 2, [2, 3], 'ket', '1')
off_diagonal.add_arrow([4, 1], 3, [3, 2], 'out')
#pw6 alpha
off_diagonal.add_arrow([5, 0], 0, [0, 2], 'ket', '2\'')
off_diagonal.add_arrow([5, 0], 1, [2, 0], 'ket', '-2')
off_diagonal.add_arrow([5, 0], 2, [0, 1], 'ket', '1')
off_diagonal.add_arrow([5, 0], 3, [1, 0], 'out')
#pw6 beta
off_diagonal.add_arrow([5, 1], 0, [0, 2], 'ket', '2\'')
off_diagonal.add_arrow([5, 1], 1, [0, 2], 'bra', '-2')
off_diagonal.add_arrow([5, 1], 2, [2, 3], 'ket', '1')
off_diagonal.add_arrow([5, 1], 3, [3, 2], 'out')
off_diagonal.plot('WMEL_off_diagonal.png')
plt.close()
#on-diagonal TRIEE--------------------------------------------------------------
on_diagonal = WMEL.diagram(size = [6, 3],
energies = [0., .5, 1.],
state_names = ['g', 'a', 'b', 'a+b'])
on_diagonal.label_rows([r'$\mathrm{\alpha}$', r'$\mathrm{\beta}$', r'$\mathrm{\gamma}$'])
on_diagonal.label_columns(['I', 'II', 'III', 'IV', 'V', 'VI'])
on_diagonal.clear_diagram([1, 2])
on_diagonal.clear_diagram([3, 2])
#pw1 alpha
on_diagonal.add_arrow([0, 0], 0, [0, 1], 'ket', '1')
on_diagonal.add_arrow([0, 0], 1, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([0, 0], 2, [1, 0], 'bra', '2\'')
on_diagonal.add_arrow([0, 0], 3, [1, 0], 'out')
#pw1 beta
on_diagonal.add_arrow([0, 1], 0, [0, 1], 'ket', '1')
on_diagonal.add_arrow([0, 1], 1, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([0, 1], 2, [1, 2], 'ket', '2\'')
on_diagonal.add_arrow([0, 1], 3, [2, 1], 'out')
#pw1 gamma
on_diagonal.add_arrow([0, 2], 0, [0, 1], 'ket', '1')
on_diagonal.add_arrow([0, 2], 1, [1, 0], 'ket', '-2')
on_diagonal.add_arrow([0, 2], 2, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([0, 2], 3, [1, 0], 'out')
#pw2 alpha
on_diagonal.add_arrow([1, 0], 0, [0, 1], 'ket', '1')
on_diagonal.add_arrow([1, 0], 1, [1, 2], 'ket', '2\'')
on_diagonal.add_arrow([1, 0], 2, [2, 1], 'ket', '-2')
on_diagonal.add_arrow([1, 0], 3, [1, 0], 'out')
#pw2 beta
on_diagonal.add_arrow([1, 1], 0, [0, 1], 'ket', '1')
on_diagonal.add_arrow([1, 1], 1, [1, 2], 'ket', '2\'')
on_diagonal.add_arrow([1, 1], 2, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([1, 1], 3, [2, 1], 'out')
#pw3 alpha
on_diagonal.add_arrow([2, 0], 0, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([2, 0], 1, [0, 1], 'ket', '1')
on_diagonal.add_arrow([2, 0], 2, [1, 0], 'bra', '2\'')
on_diagonal.add_arrow([2, 0], 3, [1, 0], 'out')
#pw3 beta
on_diagonal.add_arrow([2, 1], 0, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([2, 1], 1, [0, 1], 'ket', '1')
on_diagonal.add_arrow([2, 1], 2, [1, 2], 'ket', '2\'')
on_diagonal.add_arrow([2, 1], 3, [2, 1], 'out')
#pw3 gamma
on_diagonal.add_arrow([2, 2], 0, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([2, 2], 1, [1, 0], 'bra', '1')
on_diagonal.add_arrow([2, 2], 2, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([2, 2], 3, [1, 0], 'out')
#pw4 alpha
on_diagonal.add_arrow([3, 0], 0, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([3, 0], 1, [1, 2], 'ket', '1')
on_diagonal.add_arrow([3, 0], 2, [2, 1], 'ket', '-2')
on_diagonal.add_arrow([3, 0], 3, [1, 0], 'out')
#pw4 beta
on_diagonal.add_arrow([3, 1], 0, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([3, 1], 1, [1, 2], 'ket', '1')
on_diagonal.add_arrow([3, 1], 2, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([3, 1], 3, [2, 1], 'out')
#pw5 alpha
on_diagonal.add_arrow([4, 0], 0, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([4, 0], 1, [1, 0], 'bra', '2\'')
on_diagonal.add_arrow([4, 0], 2, [0, 1], 'ket', '1')
on_diagonal.add_arrow([4, 0], 3, [1, 0], 'out')
#pw5 beta
on_diagonal.add_arrow([4, 1], 0, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([4, 1], 1, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([4, 1], 2, [1, 2], 'ket', '1')
on_diagonal.add_arrow([4, 1], 3, [2, 1], 'out')
#pw5 gamma
on_diagonal.add_arrow([4, 2], 0, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([4, 2], 1, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([4, 2], 2, [1, 0], 'bra', '1')
on_diagonal.add_arrow([4, 2], 3, [0, 1], 'out')
#pw6 alpha
on_diagonal.add_arrow([5, 0], 0, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([5, 0], 1, [1, 0], 'ket', '-2')
on_diagonal.add_arrow([5, 0], 2, [0, 1], 'ket', '1')
on_diagonal.add_arrow([5, 0], 3, [1, 0], 'out')
#pw6 beta
on_diagonal.add_arrow([5, 1], 0, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([5, 1], 1, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([5, 1], 2, [1, 2], 'ket', '1')
on_diagonal.add_arrow([5, 1], 3, [2, 1], 'out')
#pw6 beta
on_diagonal.add_arrow([5, 2], 0, [0, 1], 'ket', '2\'')
on_diagonal.add_arrow([5, 2], 1, [0, 1], 'bra', '-2')
on_diagonal.add_arrow([5, 2], 2, [1, 0], 'bra', '1')
on_diagonal.add_arrow([5, 2], 3, [1, 0], 'out')
on_diagonal.plot('WMEL_on_diagonal.png')
plt.close()
#TSF----------------------------------------------------------------------------
tsf = WMEL.diagram(size = [1, 1],
energies = [0., 0.15, 0.25, 1.],
state_names = ['g', 'v', 'v+v\'', 'virt'],
virtual = [3])
#pw1 alpha
tsf.add_arrow([0, 0], 0, [0, 1], 'ket', '1')
tsf.add_arrow([0, 0], 1, [1, 2], 'ket', '2')
tsf.add_arrow([0, 0], 2, [2, 3], 'ket', '800')
tsf.add_arrow([0, 0], 3, [3, 0], 'out')
tsf.plot('TSF.png')
plt.close()
#population transfer------------------------------------------------------------
pop_transfer = WMEL.diagram(size = [4, 3],
energies = [0., 0.4, 0.5, 0.8, 0.9, 1.],
number_of_interactions = 6,
state_names = ['g', '1S', '1P', '2x 1S', '1S+1P', '2x 1P'])
pop_transfer.label_rows([r'$\mathrm{\alpha}$', r'$\mathrm{\beta}$', r'$\mathrm{\gamma}$'])
pop_transfer.label_columns(['diag before', 'cross before', 'diag after', 'cross after'], font_size = 8)
pop_transfer.clear_diagram([1, 2])
pop_transfer.clear_diagram([2, 2])
#diag before alpha
pop_transfer.add_arrow([0, 0], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([0, 0], 1, [2, 0], 'ket', '2\'')
pop_transfer.add_arrow([0, 0], 2, [0, 2], 'ket', '1')
pop_transfer.add_arrow([0, 0], 3, [2, 0], 'out')
#diag before beta
pop_transfer.add_arrow([0, 1], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([0, 1], 1, [0, 2], 'bra', '2\'')
pop_transfer.add_arrow([0, 1], 2, [2, 5], 'ket', '1')
pop_transfer.add_arrow([0, 1], 3, [5, 2], 'out')
#diag before gamma
pop_transfer.add_arrow([0, 2], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([0, 2], 1, [0, 2], 'bra', '2\'')
pop_transfer.add_arrow([0, 2], 2, [2, 0], 'bra', '1')
pop_transfer.add_arrow([0, 2], 3, [2, 0], 'out')
#cross before alpha
pop_transfer.add_arrow([1, 0], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([1, 0], 1, [2, 0], 'ket', '2\'')
pop_transfer.add_arrow([1, 0], 2, [0, 1], 'ket', '1')
pop_transfer.add_arrow([1, 0], 3, [1, 0], 'out')
#cross before beta
pop_transfer.add_arrow([1, 1], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([1, 1], 1, [0, 2], 'bra', '2\'')
pop_transfer.add_arrow([1, 1], 2, [2, 4], 'ket', '1')
pop_transfer.add_arrow([1, 1], 3, [4, 2], 'out')
#diag after alpha
pop_transfer.add_arrow([2, 0], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([2, 0], 1, [2, 0], 'ket', '2\'')
pop_transfer.add_arrow([2, 0], 4, [0, 2], 'ket', '1')
pop_transfer.add_arrow([2, 0], 5, [2, 0], 'out')
#diag after beta
pop_transfer.add_arrow([2, 1], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([2, 1], 1, [0, 2], 'bra', '2\'')
pop_transfer.add_arrow([2, 1], 2, [2, 1], 'ket')
pop_transfer.add_arrow([2, 1], 3, [2, 1], 'bra')
pop_transfer.add_arrow([2, 1], 4, [1, 4], 'ket', '1')
pop_transfer.add_arrow([2, 1], 5, [4, 1], 'out')
#cross after alpha
pop_transfer.add_arrow([3, 0], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([3, 0], 1, [2, 0], 'ket', '2\'')
pop_transfer.add_arrow([3, 0], 4, [0, 1], 'ket', '1')
pop_transfer.add_arrow([3, 0], 5, [1, 0], 'out')
#cross after beta
pop_transfer.add_arrow([3, 1], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([3, 1], 1, [0, 2], 'bra', '2\'')
pop_transfer.add_arrow([3, 1], 2, [2, 1], 'ket')
pop_transfer.add_arrow([3, 1], 3, [2, 1], 'bra')
pop_transfer.add_arrow([3, 1], 4, [1, 3], 'ket', '1')
pop_transfer.add_arrow([3, 1], 5, [3, 1], 'out')
#cross after gamma
pop_transfer.add_arrow([3, 2], 0, [0, 2], 'ket', '-2')
pop_transfer.add_arrow([3, 2], 1, [0, 2], 'bra', '2\'')
pop_transfer.add_arrow([3, 2], 2, [2, 1], 'ket')
pop_transfer.add_arrow([3, 2], 3, [2, 1], 'bra')
pop_transfer.add_arrow([3, 2], 4, [1, 0], 'bra', '1')
pop_transfer.add_arrow([3, 2], 5, [1, 0], 'out')
pop_transfer.plot('pop_transfer.png')
plt.close() | mit |
multigcs/quadfork | Libraries/Mavlink/pymavlink/tools/mavgpslag.py | 43 | 3446 | #!/usr/bin/env python
'''
calculate GPS lag from DF log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--plot", action='store_true', default=False, help="plot errors")
parser.add_argument("--minspeed", type=float, default=6, help="minimum speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.mavextra import *
from pymavlink.rotmat import Vector3, Matrix3
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
def velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=0):
'''return summed velocity error'''
sum = 0
count = 0
for i in range(0, len(vel)-1):
dv = vel[i+1] - vel[i]
da = Vector3()
for idx in range(1+accel_indexes[i]-shift, 1+accel_indexes[i+1]-shift):
da += gaccel[idx]
dt1 = timestamps[i+1] - timestamps[i]
dt2 = (accel_indexes[i+1] - accel_indexes[i]) * imu_dt
da *= imu_dt
da *= dt1/dt2
#print(accel_indexes[i+1] - accel_indexes[i])
ex = abs(dv.x - da.x)
ey = abs(dv.y - da.y)
sum += 0.5*(ex+ey)
count += 1
if count == 0:
return None
return sum/count
def gps_lag(logfile):
'''work out gps velocity lag times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
timestamps = []
vel = []
gaccel = []
accel_indexes = []
ATT = None
IMU = None
dtsum = 0
dtcount = 0
while True:
m = mlog.recv_match(type=['GPS','IMU','ATT'])
if m is None:
break
t = m.get_type()
if t == 'GPS' and m.Status==3 and m.Spd>args.minspeed:
v = Vector3(m.Spd*cos(radians(m.GCrs)), m.Spd*sin(radians(m.GCrs)), m.VZ)
vel.append(v)
timestamps.append(m._timestamp)
accel_indexes.append(max(len(gaccel)-1,0))
elif t == 'ATT':
ATT = m
elif t == 'IMU':
if ATT is not None:
gaccel.append(earth_accel_df(m, ATT))
if IMU is not None:
dt = m._timestamp - IMU._timestamp
dtsum += dt
dtcount += 1
IMU = m
imu_dt = dtsum / dtcount
print("Loaded %u samples imu_dt=%.3f" % (len(vel), imu_dt))
besti = -1
besterr = 0
delays = []
errors = []
for i in range(0,100):
err = velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=i)
if err is None:
break
errors.append(err)
delays.append(i*imu_dt)
if besti == -1 or err < besterr:
besti = i
besterr = err
print("Best %u (%.3fs) %f" % (besti, besti*imu_dt, besterr))
if args.plot:
import matplotlib.pyplot as plt
plt.plot(delays, errors, 'bo-')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,y2))
plt.ylabel('Error')
plt.xlabel('Delay(s)')
plt.show()
for filename in args.logs:
gps_lag(filename)
| gpl-3.0 |
inside-track/pemi | jobs/simple_job.py | 1 | 7428 | from pathlib import Path
import pandas as pd
import pemi
import pemi.pipes.csv
import pemi.pipes.dask
class JoinSalesToBeersPipe(pemi.Pipe):
def config(self):
# Maybe we only need the *REQUIRED* fields in the schemas for data subjects?
# OR!!! We don't even really need the schemas if they're just "pass-through"
# Schemas are used to do data-validation and formatting. If we don't need to
# do validation or formatting , we don't need the schema.
# Blarg..... but we do need the schemas in order to stub the data.
# So... here's what it might look like if we did default values....
self.source(
name='sales',
schema={
'beer_id': {'type': 'integer', 'required': True}
}
)
self.source(
name='beers',
schema={
'id': {'type': 'integer', 'required': True},
'style_id': {'type': 'string'}
}
)
self.target(
name='joined',
schema={
'beer_id': {'type': 'integer', 'required': True},
'style_id': {'type': 'string'}
}
)
self.target(
name='styles',
schema={
'style_id': {'type': 'string'}
}
)
def flow(self):
sales_df = self.sources['sales'].data
beers_df = self.sources['beers'].data
joined_df = pd.merge(sales_df, beers_df, how='inner', left_on='beer_id', right_on='id')
self.targets['joined'].data = joined_df
self.targets['styles'].data = joined_df[['style_id']]
class LookupStylePipe(pemi.Pipe):
def config(self):
self.source(
name='ids',
schema={
'style_id': {'type': 'string'}
}
)
self.target(
name='names',
schema={
'style': {'type': 'string'}
}
)
self.style_dict = {
'1': 'IPA',
'2': 'Pale',
'3': 'Stout'
}
def lookup(self, key):
result = self.style_dict.get(key)
if result == None:
result = 'Unknown id {}'.format(key)
return result
def flow(self):
self.targets['names'].data = pd.DataFrame()
self.targets['names'].data['style'] = self.sources['ids'].data['style_id'].apply(self.lookup)
class AddLookupPipe(pemi.Pipe):
def config(self):
self.source(
name='joined',
schema={
'beer_id': {'type': 'integer', 'required': True},
'sold_at': {'type': 'date', 'in_format': '%d/%m/%Y', 'required': True},
}
)
self.source(
name='style',
schema={
'style': {'type': 'string'}
}
)
self.target(
name='beer_sales',
schema={
'beer_id': {'type': 'integer', 'required': True},
'sold_at': {'type': 'date', 'in_format': '%d/%m/%Y', 'required': True},
'style': {'type': 'string'}
}
)
def flow(self):
beer_sales_df = self.sources['joined'].data
beer_sales_df['style'] = self.sources['style'].data['style']
self.targets['beer_sales'].data = beer_sales_df
class MyJob(pemi.Pipe):
def config(self):
self.schemas = {
'sources': {
'sales_file': {
'beer_id': {'type': 'integer', 'required': True},
'sold_at': {'type': 'date', 'in_format': '%m/%d/%Y', 'required': True},
'quantity': {'type': 'integer', 'required': True}
},
'beers_file': {
'id': {'type': 'integer', 'required': True},
'name': {'type': 'string', 'required': True},
'style_id': {'type': 'string'},
'abv': {'type': 'float'},
'price': {'type': 'decimal', 'precision': 16, 'scale': 2}
}
},
'targets': {
'beer_sales_file': {
'beer_id': {'type': 'integer', 'required': True},
'name': {'type': 'string', 'required': True},
'style': {'type': 'string'},
'sold_at': {'type': 'date', 'in_format': '%m/%d/%Y', 'required': True},
'quantity': {'type': 'integer', 'required': True},
'unit_price': {'type': 'decimal', 'precision': 16, 'scale': 2},
'sell_price': {'type': 'decimal', 'precision': 16, 'scale': 2}
}
}
}
# This "job" doesn't really have sources/targets
# It's just a pipe connector
# So, how do I deal with schemas? Particularly the intermediate ones?
# We could put them in the parameters of the pipes
# Or make a distinction between required and inferred schemas.....
# Named pipes
# Call these pipes? Pipes in pipes
self.pipe(
name='sales_file',
pipe=pemi.pipes.csv.LocalCsvFileSourcePipe(
schema=self.schemas['sources']['sales_file'],
paths=[Path(__file__).parent / Path('fixtures') / Path('sales.csv')],
csv_opts={
'sep': '|'
}
)
)
self.pipe(
name='beers_file',
pipe=pemi.pipes.csv.LocalCsvFileSourcePipe(
schema=self.schemas['sources']['beers_file'],
paths=[Path(__file__).parent / Path('fixtures') / Path('beers.csv')],
csv_opts={
'sep': '|'
}
)
)
self.pipe(
name='join_sales_to_beers',
pipe=JoinSalesToBeersPipe()
)
self.pipe(
name='lookup_style',
pipe=LookupStylePipe()
)
self.pipe(
name='add_lookup',
pipe=AddLookupPipe()
)
self.pipe(
name='beer_sales_file',
pipe=pemi.pipes.csv.LocalCsvFileTargetPipe(
schema = self.schemas['targets']['beer_sales_file'],
path='beer_sales.csv'
)
)
# Connections
self.connect(
self.pipes['sales_file'], 'main'
).to(
self.pipes['join_sales_to_beers'], 'sales'
)
self.connect(
self.pipes['beers_file'], 'main'
).to(
self.pipes['join_sales_to_beers'], 'beers'
)
self.connect(
self.pipes['join_sales_to_beers'], 'joined'
).to(
self.pipes['add_lookup'], 'joined'
)
self.connect(
self.pipes['join_sales_to_beers'], 'styles'
).to(
self.pipes['lookup_style'], 'ids'
)
self.connect(
self.pipes['lookup_style'], 'names'
).to(
self.pipes['add_lookup'], 'style'
)
self.connect(
self.pipes['add_lookup'], 'beer_sales'
).to(
self.pipes['beer_sales_file'], 'main'
)
self.dask = pemi.pipes.dask.DaskFlow(self.connections)
def flow(self):
self.dask.flow()
| mit |
youprofit/scikit-image | doc/examples/plot_brief.py | 32 | 1879 | """
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by
detecting and extracting features at different scales.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/widgets/menu.py | 10 | 4936 | from __future__ import division, print_function
import numpy as np
import matplotlib
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib.mathtext as mathtext
import matplotlib.pyplot as plt
import matplotlib.artist as artist
import matplotlib.image as image
class ItemProperties:
def __init__(self, fontsize=14, labelcolor='black', bgcolor='yellow',
alpha=1.0):
self.fontsize = fontsize
self.labelcolor = labelcolor
self.bgcolor = bgcolor
self.alpha = alpha
self.labelcolor_rgb = colors.colorConverter.to_rgb(labelcolor)
self.bgcolor_rgb = colors.colorConverter.to_rgb(bgcolor)
class MenuItem(artist.Artist):
parser = mathtext.MathTextParser("Bitmap")
padx = 5
pady = 5
def __init__(self, fig, labelstr, props=None, hoverprops=None,
on_select=None):
artist.Artist.__init__(self)
self.set_figure(fig)
self.labelstr = labelstr
if props is None:
props = ItemProperties()
if hoverprops is None:
hoverprops = ItemProperties()
self.props = props
self.hoverprops = hoverprops
self.on_select = on_select
x, self.depth = self.parser.to_mask(
labelstr, fontsize=props.fontsize, dpi=fig.dpi)
if props.fontsize!=hoverprops.fontsize:
raise NotImplementedError(
'support for different font sizes not implemented')
self.labelwidth = x.shape[1]
self.labelheight = x.shape[0]
self.labelArray = np.zeros((x.shape[0], x.shape[1], 4))
self.labelArray[:, :, -1] = x/255.
self.label = image.FigureImage(fig, origin='upper')
self.label.set_array(self.labelArray)
# we'll update these later
self.rect = patches.Rectangle((0,0), 1,1)
self.set_hover_props(False)
fig.canvas.mpl_connect('button_release_event', self.check_select)
def check_select(self, event):
over, junk = self.rect.contains(event)
if not over:
return
if self.on_select is not None:
self.on_select(self)
def set_extent(self, x, y, w, h):
print(x, y, w, h)
self.rect.set_x(x)
self.rect.set_y(y)
self.rect.set_width(w)
self.rect.set_height(h)
self.label.ox = x+self.padx
self.label.oy = y-self.depth+self.pady/2.
self.rect._update_patch_transform()
self.hover = False
def draw(self, renderer):
self.rect.draw(renderer)
self.label.draw(renderer)
def set_hover_props(self, b):
if b:
props = self.hoverprops
else:
props = self.props
r, g, b = props.labelcolor_rgb
self.labelArray[:, :, 0] = r
self.labelArray[:, :, 1] = g
self.labelArray[:, :, 2] = b
self.label.set_array(self.labelArray)
self.rect.set(facecolor=props.bgcolor, alpha=props.alpha)
def set_hover(self, event):
'check the hover status of event and return true if status is changed'
b,junk = self.rect.contains(event)
changed = (b != self.hover)
if changed:
self.set_hover_props(b)
self.hover = b
return changed
class Menu:
def __init__(self, fig, menuitems):
self.figure = fig
fig.suppressComposite = True
self.menuitems = menuitems
self.numitems = len(menuitems)
maxw = max([item.labelwidth for item in menuitems])
maxh = max([item.labelheight for item in menuitems])
totalh = self.numitems*maxh + (self.numitems+1)*2*MenuItem.pady
x0 = 100
y0 = 400
width = maxw + 2*MenuItem.padx
height = maxh+MenuItem.pady
for item in menuitems:
left = x0
bottom = y0-maxh-MenuItem.pady
item.set_extent(left, bottom, width, height)
fig.artists.append(item)
y0 -= maxh + MenuItem.pady
fig.canvas.mpl_connect('motion_notify_event', self.on_move)
def on_move(self, event):
draw = False
for item in self.menuitems:
draw = item.set_hover(event)
if draw:
self.figure.canvas.draw()
break
fig = plt.figure()
fig.subplots_adjust(left=0.3)
props = ItemProperties(labelcolor='black', bgcolor='yellow',
fontsize=15, alpha=0.2)
hoverprops = ItemProperties(labelcolor='white', bgcolor='blue',
fontsize=15, alpha=0.2)
menuitems = []
for label in ('open', 'close', 'save', 'save as', 'quit'):
def on_select(item):
print('you selected %s' % item.labelstr)
item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
on_select=on_select)
menuitems.append(item)
menu = Menu(fig, menuitems)
plt.show()
| mit |
RuthAngus/K2rotation | plots/k2_rotation_plots.py | 1 | 12290 | # This script creates the K2_rotation_poster_child.pdf figure
# it can also plot the FFT of the eigen light curves,
# the 2nd K2pgram (periodogram of a 2nd frequency),
# the data conditioned on the best freqency and the top
# eigen light curves.
import numpy as np
import matplotlib.pyplot as plt
import fitsio
from K2pgram import K2pgram, eval_freq
from rotation_poster_child import max_peak_detect
import h5py
from gatspy.periodic import LombScargle
import wget
import subprocess
import scipy.signal as sps
from plotstuff import colours
cols = colours()
def read_data(epid, nbases):
# read the data
try:
data = fitsio.read("../data/c1/ktwo%s-c01_lpd-lc.fits" % epid)
except:
e = str(int(epid))
base_url = "http://bbq.dfm.io/ketu/lightcurves/c1"
url = "%s/%s00000/%s000/ktwo%s-c01_lpd-lc.fits" \
% (base_url, e[:4], e[4:6], e)
print url
wget.download(url)
subprocess.call("mv ktwo%s-c01_lpd-lc.fits ../data/c1" % epid,
shell=True)
data = fitsio.read("../data/c1/ktwo%s-c01_lpd-lc.fits" % epid)
aps = fitsio.read("../data/c1/ktwo%s-c01_lpd-lc.fits" % epid, 2)
y = data["flux"][:, np.argmin(aps["cdpp6"])]
x = data["time"]
q = data["quality"]
l = np.isfinite(y) * np.isfinite(x) * (q==0)
y, x = y[l], x[l]
y /= np.median(y)
y -= 1
# load basis
with h5py.File("../data/c1.h5", "r") as f:
basis = f["basis"][:nbases, l]
return x, y, basis
# create file containing highest amplitude frequency for all elcs
def bases_FFT(eid, nbases):
x, y, basis = read_data(eid, nbases)
fs = np.linspace(1e-6, .7, 1000)
ps = 1./fs
plt.clf()
cols = np.linspace(.1, .99, len(basis))
freqs = []
for i in range(len(basis)):
model = LombScargle().fit(x, basis[i], np.ones_like(x)*1e-5)
pgram = model.periodogram(ps)
plt.plot(fs, pgram, color="%s" % cols[i])
freqs.append(fs[pgram==max(pgram)][0])
plt.savefig("all_bases")
freqs = np.array(freqs)
np.savetxt("elc_freqs.txt", np.transpose((np.arange(nbases), freqs)))
def K2pgram2(x, y, fs, basis):
# construct arrays
AT = np.concatenate((basis, np.ones((5, len(y)))), axis=0)
ATA = np.dot(AT, AT.T)
# compute 2nd k2pgram
s2n = K2pgram2(x, y, mx, fs, AT, ATA)
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(x, y, "k")
plt.xlim(min(x), max(x))
plt.xlabel("$\mathrm{BJD-2454833~(days)}$")
plt.ylabel("$\mathrm{Normalized~Flux}$")
plt.subplot(2, 1, 2)
plt.plot(fs, s2n, "k")
plt.xlabel("$\mathrm{Frequency~(days}^{-1}\mathrm{)}$")
# plt.ylabel("$\mathrm{S/N}$")
plt.ylabel("$\mathrm{Power}$")
plt.subplots_adjust(hspace=.4)
mx, my = max_peak_detect(fs, s2n)
plt.axvline(mx, color=".5", linestyle="--", label="$P_{rot}=%.2f$" % mx)
plt.savefig("../documents/K22_rotation_poster_child.pdf")
# plot the best frequencies
def plot_best(x, y, fs, AT, ATA, mx):
plt.clf()
f = mx
plt.subplot(2, 1, 1)
s2n, trends = eval_freq(x, y, f, AT, ATA, compute_trends=True)
plt.plot(x, y-trends, "k")
plt.subplot(2, 1, 2)
s2n, trends = eval_freq(x, y, f, AT, ATA, compute_trends=True)
plt.plot(x, y, x, trends)
plt.savefig("test1")
plt.clf()
f = mx
plt.subplot(2, 1, 1)
s2n, trends = eval_freq(x, y, 1./10.52, AT, ATA, compute_trends=True)
plt.plot(x, y-trends, "k")
plt.subplot(2, 1, 2)
s2n, trends = eval_freq(x, y, 1./10.52, AT, ATA, compute_trends=True)
plt.plot(x, y, x, trends)
plt.savefig("test2")
def K2_poster_child_plot(x, y, fs, s2n, epid):
plotpar = {'axes.labelsize': 16,
'text.fontsize': 16,
'legend.fontsize': 16,
'xtick.labelsize': 14,
'ytick.labelsize': 16,
'text.usetex': True}
plt.rcParams.update(plotpar)
# find highest peak
mx, my = max_peak_detect(fs, s2n)
fname = "../data/c1/ktwo%s-c01_lpd-lc.fits" % epid
data = fitsio.read(fname)
aps = fitsio.read(fname, 2)
y = data["flux"][:, np.argmin(aps["cdpp6"])]
x = data["time"]
q = data["quality"]
l = np.isfinite(y) * np.isfinite(x) * (q==0)
y, x = y[l], x[l]
MAD = np.median(y - np.median(y))
# construct arrays
AT = np.concatenate((basis, np.ones((3, len(y)))), axis=0)
ATA = np.dot(AT, AT.T)
_, _, trends = eval_freq(x, y, mx, AT, ATA, compute_trends=True)
plt.clf()
plt.subplot(2, 1, 1)
l = x < 2016
m1 = np.median(y[l])
m2 = np.median(y[~l])
plt.plot(x[l], y[l], ".7")
plt.plot(x[l], y[l]-trends[l]+m1, "k")
plt.plot(x[~l], y[~l], ".7")
plt.plot(x[~l], y[~l]-trends[~l]+m1, "k")
plt.xlim(min(x), max(x))
plt.xlabel("$\mathrm{BJD-2454833~(days)}$")
plt.ylabel("$\mathrm{Relative~Flux}$")
plt.title("$\mathrm{EPIC~%s}$" % epid)
ps2, pgram = np.genfromtxt("lspgram_%s.txt" % epid).T
plt.subplot(2, 1, 2)
if MAD == 0.: MAD = 1
signal = s2n/MAD**2
oom = - int(np.log10(signal[signal==max(signal)]))
if abs(oom) > 1:
# if epid == 201142023:
# print "plotting both", "\n"
# plt.plot(ps2, pgram*2, "r")
plt.plot(1./fs, signal*10**oom, "k")
plt.ylabel("$\mathrm{Relative~(S/N}^2\mathrm{)~(} \\times 10^%s\mathrm{)}$" % oom)
else:
# if epid == 201142023:
# print "plotting both", "\n"
# plt.plot(ps2, pgram*2, "r")
plt.plot(1./fs, signal, "k")
plt.ylabel("$\mathrm{Relative~(S/N)}^2$")
plt.xlabel("$\mathrm{Period~(days)}$")
if epid == 201142023:
plt.xscale("log")
plt.xlim(10**.2, 10**2)
else:
plt.xlim(min(1./fs), 70)
plt.subplots_adjust(left=.13, hspace=.4)
plt.axvline(1./mx, color=".5", linestyle="--",
label="$P_{max}=%.2f ~\mathrm{days}$" % (1./mx))
plt.legend(loc="best")
print "../documents/K2_rotation_%s.pdf" % epid
plt.savefig("../documents/K2_rotation_%s.pdf" % epid)
plt.savefig("K2_rotation_%s" % epid, transparent=True)
return mx
def K2_conditioned_plot(fs, epid):
x, y, basis = read_data(epid, 150)
amps2, s2n, w = K2pgram(x, y, basis, fs)
# find highest peak
mx, my = max_peak_detect(fs, s2n)
# construct arrays
AT = np.concatenate((basis, np.ones((3, len(y)))), axis=0)
ATA = np.dot(AT, AT.T)
_, _, trends = eval_freq(x, y, mx, AT, ATA, compute_trends=True)
plt.clf()
plt.subplot(2, 1, 1)
l = x < 2016
plt.plot(x[l], y[l], "k")
plt.plot(x[l], y[l]-trends[l])
plt.plot(x[~l], y[~l], "k")
plt.plot(x[~l], y[~l]-trends[~l])
plt.xlim(min(x), max(x))
plt.xlabel("$\mathrm{BJD-2454833~(days)}$")
plt.ylabel("$\mathrm{Normalized~Flux}$")
plt.subplot(2, 1, 2)
plt.plot(fs, s2n*1e5, "k")
plt.xlabel("$\mathrm{Frequency~(days}^{-1}\mathrm{)}$")
plt.ylabel(r"$\mathrm{S/N~(} \\times 10^5\mathrm{)}$")
plt.ylim(0, my*1e5)
plt.subplots_adjust(hspace=.4, bottom=.2)
plt.axvline(mx, color=".5", linestyle="--",
label="$P_{rot}=%.2f ~\mathrm{days}$" % (1./mx))
plt.legend()
plt.savefig("K2_%s" % epid)
return mx
# plot the top 5 components
def top_5(x, basis, w):
b = 3
sw = np.sort(w)
l = np.arange(len(w))[w == sw[0]][0]
print l
plt.clf()
plt.subplot(5, 1, 1)
plt.plot(x[::b], basis[l, :][::b], "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 1, 2)
l = np.arange(len(w))[w == sw[1]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.yticks(visible=False)
plt.xticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 1, 3)
l = np.arange(len(w))[w == sw[2]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 1, 4)
l = np.arange(len(w))[w == sw[3]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 1, 5)
l = np.arange(len(w))[w == sw[4]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.yticks(visible=False)
plt.xlabel("$\mathrm{BJD-2454833}$")
plt.subplots_adjust(hspace=0)
plt.xlim(x[0], x[-1])
plt.savefig("../documents/%s_top5.pdf" % epid)
# plot the top 5 components
def top_5_pgram(x, basis, w):
plotpar = {'axes.labelsize': 15,
'text.fontsize': 15,
'legend.fontsize': 15,
'xtick.labelsize': 12,
'ytick.labelsize': 14,
'text.usetex': True}
plt.rcParams.update(plotpar)
x = np.array([j.astype("float64") for j in x])
b = 3
ps = np.linspace(1., 70, 500)
fs = 1./ps
sw = np.sort(w)
l = np.arange(len(w))[w == sw[0]][0]
print l
plt.clf()
plt.subplot(5, 2, 1)
plt.plot(x[::b], basis[l, :][::b], "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(x[0], x[-1])
plt.title("$\mathrm{Top~5~Eigen~light~curves}$")
plt.subplot(5, 2, 2)
pgram = sps.lombscargle(x, basis[l, :], 2*np.pi*fs)
# plt.plot(fs, pgram, "k")
plt.plot(ps, pgram, "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(min(ps), max(ps))
plt.title("$\mathrm{LS~periodograms}$")
plt.subplot(5, 2, 3)
l = np.arange(len(w))[w == sw[1]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.yticks(visible=False)
plt.xticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 2, 4)
pgram = sps.lombscargle(x, basis[l, :], 2*np.pi*fs)
plt.plot(ps, pgram, "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(min(ps), max(ps))
plt.subplot(5, 2, 5)
l = np.arange(len(w))[w == sw[2]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 2, 6)
pgram = sps.lombscargle(x, basis[l, :], 2*np.pi*fs)
plt.plot(ps, pgram, "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.ylabel("$\mathrm{Power}$")
plt.xlim(min(ps), max(ps))
plt.subplot(5, 2, 7)
l = np.arange(len(w))[w == sw[3]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(x[0], x[-1])
plt.subplot(5, 2, 8)
pgram = sps.lombscargle(x, basis[l, :], 2*np.pi*fs)
plt.plot(ps, pgram, "k")
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.xlim(min(ps), max(ps))
plt.subplot(5, 2, 9)
l = np.arange(len(w))[w == sw[4]][0]
print l
plt.plot(x[::b], basis[l, :][::b], "k")
plt.yticks(visible=False)
plt.xlabel("$\mathrm{BJD-2454833 (days)}$")
plt.subplots_adjust(hspace=0)
plt.xlim(x[0], x[-1])
plt.subplot(5, 2, 10)
pgram = sps.lombscargle(x, basis[l, :], 2*np.pi*fs)
plt.plot(ps, pgram, "k")
plt.yticks(visible=False)
plt.xlabel("$\mathrm{Period (days)}$")
plt.xlim(min(ps), max(ps))
plt.subplots_adjust(left=.01, right=.97, wspace=.1)
plt.savefig("../documents/top5pgram_%s.pdf" % epid)
if __name__ == "__main__":
# epid = "201317002" # original
# epid = "201129544" # slight discrepancy between ACF and pgrams
# epid = "201132518"
# eids = [201129544, 201132518, 201133037, 201133147, 201135311, 201138638,
# 201138849, 201142023, 201142127]
eids = [201133037, 201142023]
# eids = [201142023]
for epid in eids:
x, y, basis = read_data(epid, 150)
# compute K2 pgram
try:
fs, s2n, w = np.genfromtxt("%spgram_.txt" % epid).T
print "periodogram file found"
except:
print "calculating SIP"
fs = np.linspace(1e-6, .7, 1000)
s2n, amp2s, w = K2pgram(x, y, basis, fs)
amp2s, s2n, w = K2pgram(x, y, basis, fs)
np.savetxt("%spgram.txt" % epid, np.transpose((fs, s2n)))
K2_poster_child_plot(x, y, fs, s2n, epid)
# top_5(x, basis, w)
# top_5_pgram(x, basis, w)
# K2_conditioned_plot(fs, epid)
| mit |
egoruss/phaseom | MainPhaseOM.py | 1 | 2356 | # !python.exe
# coding: cp1251
''' -*- coding: utf-8 -*- '''
from __future__ import with_statement
import os
from datetime import datetime, date, time
from time import *
from types import *
import sip
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
from pandas import read_csv, read_excel, DataFrame
from sklearn import preprocessing
from sklearn import metrics
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.feature_selection import RFE
from sklearn.cross_validation import train_test_split
def ToPrintLog (sMess):
print str(datetime.now().strftime("%d.%m.%Y %H:%M:%S ")) + str(sMess)
dataset = read_excel("EnergyEfficiency\ENB2012_data.xls", sheet_name='Data1', index_col=None, na_values=['NA'])
#dataset.head()
ToPrintLog ("Êîëè÷åñòâî íàáëþäåíèé : " + str(dataset.Y1.count()))
print dataset.head()
# Ñïèñîê èì¸í ïðèçíàêîâ
ListPRZ = dataset.columns._array_values()
# normalize the data attributes
X = dataset[ListPRZ]
Y1 = dataset[[u'Y1']]
# X = dataset[:,0:7]
# Y1 = dataset[:,8]
# Y1 = dataset[[8]].ravel([8])
Y2 = dataset[[u'Y2']]
Y1Y2 = dataset[[u'Y1',u'Y2']]
normalized_X = preprocessing.normalize(X)
# print normalized_X
normalized_Y1 = preprocessing.normalize(Y1)
normalized_Y2 = preprocessing.normalize(Y2)
# standardize the data attributes
standardized_X = preprocessing.scale(dataset[ListPRZ])
mcorr = dataset.corr()
print "-=:: Ìàòðèöà êîððåëÿöèé ::=-"
print mcorr
mcorr.to_excel("EnergyEfficiency\ENB2012_corr.xls", sheet_name=u'Êîððåëÿöèè')
model = ExtraTreesClassifier()
# model.fit(normalized_X, Y1)
model.fit(X,Y1Y2)
# Ïîêàçàòü ñòåïåíè ñóùåñòâåííîñòè êàæäîãî ïðèçíàêà-ïðåäèêòîðà
print "-=:: Ñóùåñòâåííîñòü ïðèçíàêîâ ::=-"
print(model.feature_importances_)
# Óäàëåíèå ñòîëáöîâ ñ ìèíèìàëüíûìè êîððåëÿöèÿìè ñ öåëåâûìè ôàêòîðàìè
modeli = LogisticRegression()
# create the RFE model and select 3 attributes
rfe = RFE(model, 3)
rfe = rfe.fit(X, Y1Y2)
# summarize the selection of the attributes
print(rfe.support_)
print(rfe.ranking_)
# dataset = dataset.drop(['X1','X4'], axis=1)
# print dataset.head()
print (" -= :: END - ÊÎÍÅÖ :: =-") | gpl-2.0 |
lukas/scikit-class | examples/scikit/embedding.py | 2 | 1038 |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
#import gensim
# let X be a list of tokenized texts (i.e. list of lists of tokens)
#model = gensim.models.Word2Vec(X, size=100)
#w2v = dict(zip(model.wv.index2word, model.wv.syn0))
class MeanEmbeddingVectorizer(BaseEstimator, TransformerMixin):
def __init__(self, word2vec):
self.word2vec = word2vec
# pull out a single value from the dictionary and check its dimension
# this is a little ugly to make it work in python 2 and 3
self.dim = len(next(iter(word2vec.values())))
def fit(self, X, y):
return self
def transform(self, X):
mean_vects = []
for words in X:
word_vects = []
for w in words:
if w in self.word2vec:
word_vects.append(self.word2vec[w])
mean_vect = np.mean(word_vects, axis=0)
mean_vects.append(np.array(mean_vect))
mean_vects = np.array(mean_vects)
return mean_vects
| gpl-2.0 |
Winand/pandas | pandas/tests/indexing/test_floats.py | 7 | 27713 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import Series, DataFrame, Index, Float64Index
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
class TestFloatIndexers(object):
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is spefically testing for the error
# message
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex, tm.makeIntIndex,
tm.makeRangeIndex]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
def f():
s.iloc[3.0]
tm.assert_raises_regex(TypeError,
'cannot do positional indexing',
f)
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
i = index(5)
for s in [Series(
np.arange(len(i)), index=i), DataFrame(
np.random.randn(
len(i), len(i)), index=i, columns=i)]:
# getting
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.iloc, False),
(lambda x: x, True)]:
def f():
with catch_warnings(record=True):
idxr(s)[3.0]
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# label based can be a TypeError or KeyError
def f():
s.loc[3.0]
if s.index.inferred_type in ['string', 'unicode', 'mixed']:
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# contains
assert 3.0 not in s
# setting with a float fails with iloc
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
# setting with an indexer
if s.index.inferred_type in ['categorical']:
# Value or Type Error
pass
elif s.index.inferred_type in ['datetime64', 'timedelta64',
'period']:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
# def f():
# idxr(s2)[3.0] = 0
# pytest.raises(TypeError, f)
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
assert s2.index.is_object()
for idxr in [lambda x: x.ix,
lambda x: x]:
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
pytest.raises(TypeError, lambda: s[3.0])
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
s3 = Series([1, 2, 3], index=['a', 'b', 1.5])
# lookup in a pure string index
# with an invalid indexer
for idxr in [lambda x: x.ix,
lambda x: x,
lambda x: x.iloc]:
def f():
with catch_warnings(record=True):
idxr(s2)[1.0]
pytest.raises(TypeError, f)
pytest.raises(KeyError, lambda: s2.loc[1.0])
result = s2.loc['b']
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x.ix,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s3)[1.0]
pytest.raises(TypeError, f)
result = idxr(s3)[1]
expected = 2
assert result == expected
pytest.raises(TypeError, lambda: s3.iloc[1.0])
pytest.raises(KeyError, lambda: s3.loc[1.0])
result = s3.loc[1.5]
expected = 3
assert result == expected
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for index in [tm.makeIntIndex, tm.makeRangeIndex]:
i = index(5)
for s in [Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)),
index=i, columns=i)]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
with catch_warnings(record=True):
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
if isinstance(s, Series):
def compare(x, y):
assert x == y
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100,
index=range(len(s)), name=3)
else:
expected = Series(100.,
index=range(len(s)), name=3)
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.))
for s in [Series(np.arange(len(index)), index=index),
DataFrame(np.random.randn(len(index), len(index)),
index=index, columns=index)]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
# getting
with catch_warnings(record=True):
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
def f():
with catch_warnings(record=True):
idxr(s2)[indexer] = expected
with catch_warnings(record=True):
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
with catch_warnings(record=True):
pytest.raises(KeyError, lambda: idxr(s)[3.5])
# contains
assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
pytest.raises(TypeError, lambda: s.iloc[3.0])
def g():
s2.iloc[3.0] = 0
pytest.raises(TypeError, g)
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
index = index(5)
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l]
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l] = 0
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l] = 0
pytest.raises(TypeError, f)
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indiciates if we are out of bounds
# of positional indexing
for index, oob in [(tm.makeIntIndex(5), False),
(tm.makeRangeIndex(5), False),
(tm.makeIntIndex(5) + 10, True)]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-6, 6),
slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[slice(-6.0, 6.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res1 in [(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4))]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
sc = s.copy()
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x,
lambda x: x.iloc]:
for l in [slice(2, 4.0),
slice(2.0, 4),
slice(2.0, 4.0)]:
def f():
idxr(s)[l]
pytest.raises(TypeError, f)
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [tm.makeIntIndex, tm.makeRangeIndex]:
index = index(5)
s = DataFrame(np.random.randn(5, 2), index=index)
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
# getitem
for l in [slice(0.0, 1),
slice(0, 1.0),
slice(0.0, 1.0)]:
with catch_warnings(record=True):
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-10, 10),
slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
def f():
s[slice(-10.0, 10.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res in [(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2))]:
with catch_warnings(record=True):
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
sc = s.copy()
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.)) + 0.1
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
# getitem
with catch_warnings(record=True):
result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.loc[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.loc[5.0]
assert result1 == result2
assert result1 == result3
result1 = s[5]
result2 = s.loc[5]
result3 = s.loc[5]
assert result1 == result2
assert result1 == result3
assert s[5.0] == s[5]
# value not found (and no fallbacking at all)
# scalar integers
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s[4])
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype='int64'))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.loc[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.loc[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s[[1.6, 5, 10]]
result2 = s.loc[[1.6, 5, 10]]
result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[np.nan, 2, 4], index=[1.6, 5, 10]))
result1 = s[[0, 1, 2]]
result2 = s.loc[[0, 1, 2]]
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[0.0, np.nan, np.nan], index=[0, 1, 2]))
result1 = s.loc[[2.5, 5]]
result2 = s.loc[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))
result1 = s[[2.5]]
result2 = s.loc[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1], index=[2.5]))
def test_floating_tuples(self):
# see gh-13509
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name='foo')
result = s[0.0]
assert result == (1, 1)
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name='foo')
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name='foo')
result = s[0.0]
tm.assert_series_equal(result, expected)
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {256: 2321.0,
1: 78.0,
2: 2716.0,
3: 0.0,
4: 369.0,
5: 0.0,
6: 269.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 3536.0,
11: 0.0,
12: 24.0,
13: 0.0,
14: 931.0,
15: 0.0,
16: 101.0,
17: 78.0,
18: 9643.0,
19: 0.0,
20: 0.0,
21: 0.0,
22: 63761.0,
23: 0.0,
24: 446.0,
25: 0.0,
26: 34773.0,
27: 0.0,
28: 729.0,
29: 78.0,
30: 0.0,
31: 0.0,
32: 3374.0,
33: 0.0,
34: 1391.0,
35: 0.0,
36: 361.0,
37: 0.0,
38: 61808.0,
39: 0.0,
40: 0.0,
41: 0.0,
42: 6677.0,
43: 0.0,
44: 802.0,
45: 0.0,
46: 2691.0,
47: 0.0,
48: 3582.0,
49: 0.0,
50: 734.0,
51: 0.0,
52: 627.0,
53: 70.0,
54: 2584.0,
55: 0.0,
56: 324.0,
57: 0.0,
58: 605.0,
59: 0.0,
60: 0.0,
61: 0.0,
62: 3989.0,
63: 10.0,
64: 42.0,
65: 0.0,
66: 904.0,
67: 0.0,
68: 88.0,
69: 70.0,
70: 8172.0,
71: 0.0,
72: 0.0,
73: 0.0,
74: 64902.0,
75: 0.0,
76: 347.0,
77: 0.0,
78: 36605.0,
79: 0.0,
80: 379.0,
81: 70.0,
82: 0.0,
83: 0.0,
84: 3001.0,
85: 0.0,
86: 1630.0,
87: 7.0,
88: 364.0,
89: 0.0,
90: 67404.0,
91: 9.0,
92: 0.0,
93: 0.0,
94: 7685.0,
95: 0.0,
96: 1017.0,
97: 0.0,
98: 2831.0,
99: 0.0,
100: 2963.0,
101: 0.0,
102: 854.0,
103: 0.0,
104: 0.0,
105: 0.0,
106: 0.0,
107: 0.0,
108: 0.0,
109: 0.0,
110: 0.0,
111: 0.0,
112: 0.0,
113: 0.0,
114: 0.0,
115: 0.0,
116: 0.0,
117: 0.0,
118: 0.0,
119: 0.0,
120: 0.0,
121: 0.0,
122: 0.0,
123: 0.0,
124: 0.0,
125: 0.0,
126: 67744.0,
127: 22.0,
128: 264.0,
129: 0.0,
260: 197.0,
268: 0.0,
265: 0.0,
269: 0.0,
261: 0.0,
266: 1198.0,
267: 0.0,
262: 2629.0,
258: 775.0,
257: 0.0,
263: 0.0,
259: 0.0,
264: 163.0,
250: 10326.0,
251: 0.0,
252: 1228.0,
253: 0.0,
254: 2769.0,
255: 0.0}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
| bsd-3-clause |
secimTools/SECIMTools | src/scripts/modify_design_file.py | 1 | 5778 | #!/usr/bin/env python
################################################################################
# DATE: 2016/October/10
#
# MODULE: subset_data.py
#
# VERSION: 1.2
#
# AUTHOR: Miguel Ibarra ([email protected])
#
# DESCRIPTION: Subsets design file data based on the groups in the
# design file.
#
################################################################################
# Standard Libraries
import os
import re
import logging
import argparse
from itertools import chain
# AddOn Libraries
import numpy as np
import pandas as pd
# Local Libraries
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
def getOptions():
"""Function to pull arguments"""
parser = argparse.ArgumentParser(description="Removes samples from the design file" \
"belonging to the user-specified group(s).")
# Standar Input
standar = parser.add_argument_group(title="Standard input",
description= "Standard input for SECIM tools.")
standar.add_argument("-i","--input",dest="input", action='store',
required=True, help="Input dataset in wide format.")
standar.add_argument("-d","--design",dest="design", action='store',
required=True, help="Design file.")
standar.add_argument("-id","--uniqID",dest="uniqID",action="store",
required=True, help="Name of the column with unique" \
"dentifiers.")
standar.add_argument("-g","--group", dest="group", action='store',
required=False, help="Name of column in design file" \
"with Group/treatment information.")
# Tool Especific
tool = parser.add_argument_group(title="Tool specific input",
description= "Input that is especific for this tool.")
tool.add_argument("-dp","--drops", dest="drops", action='store',
required=True, help="Name of the groups in your"\
"group/treatment column that you want to remove from the design file.")
# Output Paths
output = parser.add_argument_group(title='Output paths',
description="Paths for the output files")
output.add_argument("-o","--out",dest="out",action="store",
required=True,help="Output path for the new design file")
args = parser.parse_args()
# Standardize paths
args.out = os.path.abspath(args.out)
args.input = os.path.abspath(args.input)
args.design = os.path.abspath(args.design)
# Split groups/samples to drop
args.drops = args.drops.split(",")
return (args)
def cleanStr(x):
""" Clean strings so they behave.
For some modules, uniqIDs and groups cannot contain spaces, '-', '*',
'/', '+', or '()'. For example, statsmodel parses the strings and interprets
them in the model.
:Arguments:
x (str): A string that needs cleaning
:Returns:
x (str): The cleaned string.
self.origString (dict): A dictionary where the key is the new
string and the value is the original string. This will be useful
for reverting back to original values.
"""
if isinstance(x, str):
val = x
x = re.sub(r'^-([0-9].*)', r'__\1', x)
x = x.replace(' ', '_')
x = x.replace('.', '_')
x = x.replace('-', '_')
x = x.replace('*', '_')
x = x.replace('/', '_')
x = x.replace('+', '_')
x = x.replace('(', '_')
x = x.replace(')', '_')
x = x.replace('[', '_')
x = x.replace(']', '_')
x = x.replace('{', '_')
x = x.replace('}', '_')
x = x.replace('"', '_')
x = x.replace('\'', '_')
x = re.sub(r'^([0-9].*)', r'_\1', x)
return x
def main(args):
# Importing data trough
logger.info("Importing data through wideToDesign data manager")
dat = wideToDesign(args.input, args.design, args.uniqID,
logger=logger)
# Cleaning from missing data
dat.dropMissing()
# Making sure all the groups to drop actually exist on the design column
if args.group:
for todrop in args.drops:
if todrop in list(set(dat.design[args.group].values)):
pass
else:
logger.error("The group '{0}' is not located in the column '{1}' "\
"of your design file".format(todrop,args.group))
raise ValueError
# If the subsetting is going to be made by group the select de sampleIDs
# from the design file
logger.info(u"Getting sampleNames to drop")
if args.group:
iToDrop = list()
for name,group in dat.design.groupby(args.group):
if name in args.drops:
iToDrop+=(group.index.tolist())
else:
iToDrop = args.drops
# Remove weird characters
iToDrop = [cleanStr(x) for x in iToDrop]
# Dropping elements
selectedDesign = dat.design.drop(iToDrop,axis=0, inplace=False)
# Output wide results
logger.info("Outputing design file")
selectedDesign.to_csv(args.out, sep='\t')
logger.info("Script Complete!")
if __name__ == '__main__':
#Import data
args = getOptions()
#Setting logger
logger = logging.getLogger()
sl.setLogger(logger)
logger.info("Importing data with following parameters:"
"\n\tInput: {0}"
"\n\tDesign: {1}"
"\n\tuniqID: {2}"
"\n\tgroup: {3}"
"\n\tToDrop: {4}".format(args.input, args.design, args.uniqID,
args.group, args.drops))
# Main script
main(args)
| mit |
npdoty/bigbang | bin/mail_to_activity.py | 3 | 1973 | import sys
import os
import os.path
import getopt
import bigbang.mailman as mailman
import bigbang.archive
import pandas as pd
import logging
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=r"""
Creates activity frames from mailing list archives.
Provide the path to a directory of archives, containing subdirectories for each mailing list.
For example:
python bin/mail_to_activity.py --archives ../archives
""")
parser.add_argument('--archives', type=str, help='Path to a specified directory of downloaded mail archives', required=True)
parser.add_argument('-f', '--force', action='store_true', help='Overwrite existing -activity.csv files; by default this is false and directories with an existing file are skipped.')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
def main(args):
subdirectories = next(os.walk(args.archives))[1]
for subdirectory in subdirectories:
logging.info('Processing archives in %s' % subdirectory)
if not args.force:
out_path = os.path.join(args.archives, subdirectory, ('%s-activity.csv' % subdirectory))
if os.path.isfile(out_path): # if file already exists, skip
continue
try:
archives = mailman.open_list_archives(subdirectory, args.archives)
activity = bigbang.archive.Archive(archives).get_activity()
# sum the message count, rather than by date, to prevent enormous, sparse files
activity = pd.DataFrame(activity.sum(0), columns=['Message Count'])
out_path = os.path.join(args.archives, subdirectory, ('%s-activity.csv' % subdirectory))
with open(out_path, 'w') as f:
activity.to_csv(f, encoding='utf-8')
except Exception:
logging.warning(('Failed to produce activity frame export for %s.' % subdirectory), exc_info=True)
if __name__ == "__main__":
main(args)
| agpl-3.0 |
KU-CCB/PyPUG | pypug.py | 1 | 8083 | #!/bin/python
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
import pandas as pd
import requests
# PUG URL Prolog
PROLOG = "https://pubchem.ncbi.nlm.nih.gov/rest/pug"
# default encoding for strings
ENCODING = "utf-8"
# Don't report errors
SILENT = False
""" ---------------------------------------------------------------------------
Error Handling
--------------------------------------------------------------------------- """
PugRestErrors = {
200: { "code": "(none)", "message": "Success"},
400: { "code": "PugRest.BadRequest", "message": "Request is improperly formed"},
404: { "code": "PugRest.NotFound", "message": "The input record was not found"},
405: { "code": "PugRest.MethodNotAllowed", "message": "Request not allowed (such as invalid MIME type in the HTTP Accept header)"},
504: { "code": "PugRest.Timeout", "message": "The request timed out, from server overload or too broad a request"},
501: { "code": "PugRest.Unimplemented", "message": "The requested operation has not (yet) been implemented by the server"},
500: { "code": "PugRest.ServerError", "message": "Some problem on the server side (such as a database server down, etc.)"},
414: { "code": "Unknown", "message": "Unknown"},
#500: { "code": "PugRest.Unknown", "message": "An unknown error occurred"}
}
# This should be split into many different classes of errors based on PugRestErrors
class PugRestException(Exception):
"""
Exception thrown when a request to PUG returns with a status code other than
200 (assuming the status code is in PugRestErrors)
"""
def __init__(self, *args, **kwargs):
self.url = kwargs.pop('url', None)
self.code = kwargs.pop('code', None)
self.message = kwargs.pop('message', None)
self.response = kwargs.pop('response', None)
super(PugRestException, self).__init__(args, kwargs)
def __str__(self):
msg = (
"--url: "+self.url+"\n"
"--code: "+self.code+"\n"
"--message: "+self.message+"\n"
"--response: "+self.response+"\n")
return msg
def handleKeyError(error):
sys.stderr.write("[pypug KeyError]\n")
sys.stderr.write("--The Pug server returned an unhandled status code:")
sys.stderr.write("--This status code is unhandled in pypug.py")
sys.stderr.write(e)
sys.exit()
""" ---------------------------------------------------------------------------
Requests Wrapper
--------------------------------------------------------------------------- """
def get(url):
"""
Make an HTTP Get request to the PubChem ftp server
"""
global SILENT
response = requests.get(url)
if str(response.status_code) != "200":
if SILENT:
return ""
else:
raise PugRestException(response.text, url=url,
code=PugRestErrors[response.status_code]["code"],
message=PugRestErrors[response.status_code]["message"],
response=response.text.strip().replace('\n', ','))
else:
return response.text.strip()
def post(url, payload):
"""
Make an HTTP Post request to the PubChem ftp server
"""
global SILENT
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(url, data=payload, headers=headers)
if str(response.status_code) != "200":
if SILENT:
return ""
else:
try:
raise PugRestException(response.text, payload=payload, url=url,
code=PugRestErrors[response.status_code]["code"],
message=PugRestErrors[response.status_code]["message"],
response=response.text.strip().replace('\n', ','))
except KeyError as e:
handleKeyError(e)
else:
return response.text.strip()
""" ---------------------------------------------------------------------------
PyPUG API
--------------------------------------------------------------------------- """
def SetSilent(silent):
"""
Sets error reporting on or off. silent should be either True or False. Any
other values will be ignored
@param silent True or False
"""
global SILENT
if silent in (True, False):
SILENT = silent
def getAIDsFromGeneID(geneID, usepost=True):
"""
Returns a list of Assay IDs that have geneID as a target
@param geneID The geneID to search on.
@param usepost Boolean value indicating whether post or get should be used.
"""
response = ""
if usepost:
url = PROLOG + "/assay/target/GeneID/aids/TXT"
payload = {'geneid':geneID}
response = post(url, payload).split('\n')
else:
url = PROLOG + ("/assay/target/GeneID/%s/aids/TXT" % geneID)
response = get(url).split('\n')
return [id.encode(ENCODING) for id in response]
def getAssayFromSIDs(AID, SIDs=[]):
"""
Returns a pandas DataFrame containing the assay data for AID. This is useful
when an assay has more than 10000 associated SIDs and can't be retrieved with
getAssayFromAID due to PUG data restrictions. SIDs can be a list of the
prefetched SIDs for the assay or it can be an empty list, in which case the
SIDs for the given assay will be fetched automatically.
@param AID The AID to search on.
@param SIDs The SIDs for the given AID
"""
response = ""
pos = 0
groupSz = 9999
if len(SIDs) < 1:
SIDs = getSIDsFromAID(AID)
while pos < len(SIDs):
url = PROLOG + "/assay/aid/CSV"
payload = {'aid':AID, 'sid':",".join(SIDs[pos:pos+groupSz])}
pos = pos + groupSz + 1
if len(response) == 0:
response += post(url, payload)
else:
data = post(url, payload)
response += data[data.index('\n'):]
response = StringIO(response.encode(ENCODING))
return pd.DataFrame.from_csv(response, index_col=False, header=0)
def getAssayFromAID(AID, usepost=True):
"""
Returns a pandas DataFrame containing the assay data for AID.
@param AID The AID to search on.
@param usepost Boolean value indicating whether post or get should be used.
"""
response = ""
if usepost:
url = PROLOG + "/assay/aid/CSV"
payload = {'aid':AID}
response = StringIO(post(url, payload).encode(ENCODING))
else:
url = PROLOG + "/assay/aid/%s/CSV" % AID
response = StringIO(get(url).encode(ENCODING))
return pd.DataFrame.from_csv(response, index_col=False, header=0)
def getAssayDescriptionFromAID(AID):
"""
Return the assay description for a given AID
@param AID The AID to search on
"""
#url = PROLOG + ("/assay/aid/%s/description/ASNT" % AID)
url = PROLOG + ("/assay/aid/%s/summary/JSON" % AID) # simplified description
response = get(url) # needs to be parsed into an object
return response
def _getCompoundPropertiesFromCID(CID, properties):
"""
Return the compound and its properties identified by CID
@param CID The CID to search on
"""
url = PROLOG + ("/compound/cid/%s/property/%s/CSV" % (CID, ",".join(properties)))
response = get(url)
return response
def getCanonicalSMILESFromCID(CID):
response = _getCompoundPropertiesFromCID(CID, ["CanonicalSMILES"])
smiles = response.split('\n')[1].split(',')[1]
return smiles
def getSIDsFromAID(AID, usepost=True):
"""
Returns a list of SIDs associated with an AID.
@param geneID The geneID to search on
"""
response = ""
if usepost:
url = PROLOG + "/assay/aid/sids/TXT"
payload = {'aid':AID}
response = post(url, payload).split('\n')
else:
url = PROLOG + ("/assay/aid/%s/sids/TXT" % AID)
response = get(url).split('\n')
return [id.encode(ENCODING) for id in response]
def getCIDsFromAID(AID, usepost=True):
"""
Return a list of pubchem cids that correspond to an AID.
@param AID The AID to search on.
@param usepost Boolean value indicating whether post or get should be used.
"""
if usepost:
url = PROLOG + "/assay/aid/cids/TXT"
payload = {'aid':AID}
response = post(url, payload).split('\n')
else:
url = PROLOG + ("/assay/aid/%s/cids/TXT" % AID)
response = get(url).split('\n')
return [id.encode(ENCODING) for id in response]
| apache-2.0 |
prheenan/Research | Perkins/Projects/Primers/Util/MeltingTemperatureUtil.py | 1 | 7466 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
from Bio.SeqUtils import MeltingTemp as mt
from Bio.Seq import Seq
# #get better names for the common table we will use
# see:
# biopython.org/DIST/docs/api/Bio.SeqUtils.MeltingTemp-module.html#Tm_NN
NEAREST_N_ALLAWI_LUCIA_1997= mt.DNA_NN3
# see :
#biopython.org/DIST/docs/api/Bio.SeqUtils.MeltingTemp-pysrc.html#salt_correction
SALT_MONO_Owczarzy_2004 = 6
SALT_DIV_Owczarzy_2008 = 7
# tables
DEF_NN_TABLE = NEAREST_N_ALLAWI_LUCIA_1997
DEF_SALT=SALT_DIV_Owczarzy_2008
# default concentrations
DEF_K =0
# 10uL of 8mM (2mM per dNTP) in 100uL, converted to uM
DEF_dNTPs=10*2/100
# all concentrations below in mM, unless otherwise noted
DEF_NA=50
DEF_TRIS=0
# 6uL of 25mM in 100uL
DEF_Mg=(6*25/100.)
# oligo concentrations, in nM
DEF_OLIGO = 250
DEF_OTHER = 0
class MeltingParams:
"""
Class to encapsulate melting parameters
"""
def __init__(self,Na=DEF_NA,K=DEF_K,Tris=DEF_TRIS,Mg=DEF_Mg,dNTPs=DEF_dNTPs,
saltcorr=DEF_SALT,nn_table=DEF_NN_TABLE,dnac1=DEF_OLIGO,
dnac2=DEF_OTHER,**kwargs):
"""
Class to wrap up parameters for melting temperature
Args:
Na: Sodioum Concentration [mM]
K : potassium concentraion [mM]
Tris: tris concentrtion [mM]
Mg: magnesium concentation [mM]
dNTPs: dNTP concentration [mM]
saltcorr: salt correction method. See
nn_table: the nearest neighbor table
dnac1: concentration of strand 1 [nM]
dnac2: concentratin of strand 2 [nM]
kwargs: passed directly to melting temperature
"""
self.Na = Na
self.K = K
self.Tris = Tris
self.Mg = Mg
self.dNTPs = dNTPs
self.saltcorr = saltcorr
self.nn_table = nn_table
self.dnac1 = dnac1
self.dnac2 = dnac2
# anything else, also adds
for key,val in (kwargs.items()):
setattr(self,key,val)
def concdict(self):
"""
Returns all concentrations as a dict
"""
return dict(Na=self.Na,
K=self.K,
Tris=self.Tris,
Mg=self.Mg,
dNTPs=self.dNTPs)
# 3/16/2015:
# IDT (http://www.idtdna.com/calc/analyzer) uses
# (all biopython stuff from
# http://biopython.org/DIST/docs/api/Bio.SeqUtils.MeltingTemp-module.html )
# DNA/DNA +/- 1.4C (Allawi '97)
idtTable = NEAREST_N_ALLAWI_LUCIA_1997
# Divalent cation correction +/- 0.5C (Owczarzy '08)
# Triphosphate correction +/- 0.0C (Owczarzy '08)
# Note: monovalent must be done post; see GetIdtMeltingTemp, Below
idtSaltCorr = SALT_DIV_Owczarzy_2008
# default oligo concentration, nM
idtDnaOligoConc = 250
""" ibid:
"Oligo concentration [250nM] is assumed to be significantly larger
(at least 6x) than concentration of the complementary target."
"""
idtOtherConc = idtDnaOligoConc/6
IdtParams = MeltingParams(Na=50, # 50mM by default
Tris=0,
dNTPs=0,
Mg = 0,
K=0,
saltcorr=idtSaltCorr,
nn_table=idtTable,
dnac1=idtDnaOligoConc,
dnac2=idtOtherConc,selfcomp=True)
"""
2016/8/10: see http://www.idtdna.com/calc/analyzer,
select 'qPCR' parameter set
"""
IdtParams_qPCR = MeltingParams(Na=50,
Tris=0,
# 0.8 mM, for *all* dTNPS
dNTPs=0.8,
# mM
Mg=3,
K=0,
saltcorr=idtSaltCorr,
nn_table=idtTable,
# 200nM
dnac1=200,
dnac2=200/6, # see ibid
selfcomp=True)
# all possible salt corrrections
# from
#biopython.org/DIST/docs/api/Bio.SeqUtils.MeltingTemp-module.html#salt_correction
# note that some of these are redundant, so I just pick the ones that arent
# note that 5 through 7 are methods for something completely difference
SaltCorrections = [i for i in range(8)]
# all possible NN correction tables
# from
# http://biopython.org/DIST/docs/api/Bio.SeqUtils.MeltingTemp-pysrc.html
NearestNeighborTablesDNA = [mt.DNA_NN1,mt.DNA_NN2,mt.DNA_NN3,
mt.DNA_NN4]
def MeltingTemperature(Sequence,
**kwargs):
"""
Gets the melting temperature of a sequence. All concentrations in mM
For saltcorr and nn_table (or just generally), see:
http://biopython.org/DIST/docs/api/Bio.SeqUtils.MeltingTemp-module.html
Args:
Sequence: the sting we care about
**kwargs: passed directly to melting temperature. See MeltingParams
"""
mParams = MeltingParams(**kwargs)
paramDict= mParams.__dict__
return mt.Tm_NN(Seq(Sequence),**paramDict)
def GetAllMeltingTemperatures(sequence,**kwargs):
"""
Gets the melting temperatures from *all possible* combinations of
NN tables and salt corrections
Args:
sequence: sequence to use
**kwargs: Arguments for MeltingTemperature, concentrations only
(ie: dont try assing in saltcorr or nn_table, this does *all* of them)
returns
2-D matrix, element [i,j] is using salt correction i, nn_table j
from the 'SaltCorrections' and 'NearestNeighborTablesDNA' tables
"""
numNN = len(NearestNeighborTablesDNA)
numSalt = len(SaltCorrections)
toRet = np.zeros((numSalt,numNN))
for i,saltcorr in enumerate(SaltCorrections):
for j,nn_table in enumerate(NearestNeighborTablesDNA):
toRet[i,j] = MeltingTemperature(sequence,saltcorr=saltcorr,
nn_table=nn_table,**kwargs)
return toRet
def GetCorrectedIdtTemperature(Sequence,Params):
"""
Given a DNA sequence and parameters, uses salt correction tables to
correct it, using IDTs table (see CorrectIdtMeltingTemp)
Args:
Sequence: string to use. DNA
Params: for correction and getting hte melting temperature. instance
of MeltingParams
Returns:
float, corrected temperature
"""
mParamDict = Params.__dict__
melting_temp = MeltingTemperature(Sequence,**mParamDict)
return melting_temp
def GetIdtMeltingTemperature(sequence):
"""
Gets the melting temperature of a primer, using what IDT does. This s
essentially just DNA in a (weak) salt solution
Args:
sequence: see GetAllMeltingTemperatures
Returns:
the melting temperature, according to IDT
"""
return GetCorrectedIdtTemperature(sequence,IdtParams)
def GetIdtMeltingTemperatureForPCR(sequence):
"""
Gets the melting temperature of a primer, using what IDT does
for its 'PCR' parameter set
Args:
sequence: see GetAllMeltingTemperatures
Returns:
the melting temperature, according to IDT
"""
return GetCorrectedIdtTemperature(sequence,IdtParams_qPCR)
| gpl-3.0 |
ishanic/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
marfcg/fludashboard | tests/test_fludb.py | 2 | 1437 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_fludashboard
----------------------------------
Tests for `fludashboard` module.
"""
# local
from fludashboard.libs.flu_data import FluDB
import unittest
import pandas as pd
class TestFluDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.fludb = FluDB()
def tearDown(self):
pass
def test_get_territory_id_from_name(self):
territory_id = self.fludb.get_territory_id_from_name('Rio de Janeiro')
assert territory_id == 33
territory_id = self.fludb.get_territory_id_from_name('SÃO PAULO')
assert territory_id == 35
def test_read_data(self):
df = self.fludb.read_data(
table_name='current_estimated_values',
dataset_id=1, scale_id=1, territory_id=33,
year=2017, week=25, base_year=None, base_week=None,
historical_week=None
)
assert not df.empty
assert df.loc[0, 'epiyear'] == 2017
assert df.loc[0, 'epiweek'] == 25
assert df.loc[0, 'territory_id'] == 33
def test_get_data(self):
df = self.fludb.get_data(
dataset_id=1, scale_id=1, year=2017,
territory_id=33, week=25, show_historical_weeks=True
)
# pandas configuration
pd.set_option('display.max_columns', 99)
assert not df.empty
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/learn/python/learn/estimators/__init__.py | 34 | 12484 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| mit |
untom/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 78 | 4510 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
andyh616/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 19 | 1304 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/tests/indexes/test_common.py | 1 | 14359 | """
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.compat import IS64
from pandas.core.dtypes.common import (
is_period_dtype,
needs_i8_conversion,
)
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
msg = (
"Cannot remove 1 levels from an index with 1 levels: at least one "
"level must be left."
)
with pytest.raises(ValueError, match=msg):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index_flat):
# GH 20527
index = index_flat
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index_flat):
a = index_flat
b = type(a)(a)
tm.assert_equal(a._data, b._data)
def test_to_flat_index(self, index_flat):
# 22866
index = index_flat
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index_flat):
# MultiIndex tested separately
index = index_flat
new_name = "This is the new name for this index"
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index_flat):
from copy import (
copy,
deepcopy,
)
index = index_flat
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index_flat):
# don't test a MultiIndex here (as its tested separated)
index = index_flat
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index_flat):
# MultiIndex tested separately
index = index_flat
if not len(index):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
result = idx._get_unique_index()
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index._data._ndarray[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
expected = idx_unique_nan
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index()
tm.assert_index_equal(result, expected)
def test_searchsorted_monotonic(self, index_flat):
# GH17271
index = index_flat
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, pd.IntervalIndex):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
index._searchsorted_monotonic(value, side="left")
def test_drop_duplicates(self, index_flat, keep):
# MultiIndex is tested separately
index = index_flat
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index_flat):
# MultiIndex is tested separately
index = index_flat
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
tm.assert_index_equal(result_dropped, unique_idx)
# validate shallow copy
assert result_dropped is not unique_idx
def test_drop_duplicates_inplace(self, index):
msg = r"drop_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.drop_duplicates(inplace=True)
def test_has_duplicates(self, index_flat):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
index = index_flat
holder = type(index)
if not len(index) or isinstance(index, RangeIndex):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
# RangeIndex is unique by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_unique is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_astype_preserves_name(self, index, dtype):
# https://github.com/pandas-dev/pandas/issues/32013
if isinstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
warn = None
if dtype in ["int64", "uint64"]:
if needs_i8_conversion(index.dtype):
warn = FutureWarning
elif (
isinstance(index, DatetimeIndex)
and index.tz is not None
and dtype == "datetime64[ns]"
):
# This astype is deprecated in favor of tz_localize
warn = FutureWarning
try:
# Some of these conversions cannot succeed so we use a try / except
with tm.assert_produces_warning(warn):
result = index.astype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if isinstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_asi8_deprecation(self, index):
# GH#37877
if isinstance(index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn):
index.asi8
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_values_invalid_na_position(index_with_missing, na_position):
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
if isinstance(index_with_missing, CategoricalIndex):
pytest.skip("missing value sorting order not well-defined")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_values(na_position=na_position)
tm.assert_index_equal(result, expected)
def test_ndarray_compat_properties(index):
if isinstance(index, PeriodIndex) and not IS64:
pytest.skip("Overflow")
idx = index
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
assert idx.shape == values.shape
assert idx.ndim == values.ndim
assert idx.size == values.size
if not isinstance(index, (RangeIndex, MultiIndex)):
# These two are not backed by an ndarray
assert idx.nbytes == values.nbytes
# test for validity
idx.nbytes
idx.values.nbytes
| bsd-3-clause |
amolkahat/pandas | pandas/core/computation/align.py | 6 | 5618 | """Core eval alignment algorithms
"""
import warnings
from functools import partial, wraps
from pandas.compat import zip, range
import numpy as np
import pandas as pd
from pandas import compat
from pandas.errors import PerformanceWarning
import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
def _align_core_single_unary_op(term):
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
ret = typ,
if not hasattr(term.value, 'axes'):
ret += None,
else:
ret += _zip_axes_from_type(typ, term.value.axes),
return ret
def _zip_axes_from_type(typ, new_axes):
axes = {}
for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):
axes[ax_name] = new_axes[ax_ind]
return axes
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return _result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms)
if hasattr(term.value, 'axes')]
term_dims = [terms[i].value.ndim for i in term_index]
ndims = pd.Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, pd.Series)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how='outer')
for i, ndim in compat.iteritems(ndims):
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, 'reindex'):
transpose = isinstance(ti, pd.Series) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = ('Alignment difference on axis {axis} is larger '
'than an order of magnitude on term {term!r}, by '
'more than {ordm:.4g}; performance may suffer'
).format(axis=axis, term=terms[i].name, ordm=ordm)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
| bsd-3-clause |
shuiruge/nn4post | tests/mnist.py | 1 | 7886 | """
Description
-----------
Focked from Nealson's repository (Python3 version), with some modification.
"""
import pickle
import gzip
import numpy as np
from sklearn.utils import shuffle
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('../dat/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding="latin1")
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper(one_hot_y=False):
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
XXX
"""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = (training_inputs, training_results)
if one_hot_y:
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_results = [vectorized_result(y) for y in va_d[1]]
validation_data = (validation_inputs, validation_results)
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_results = [vectorized_result(y) for y in te_d[1]]
test_data = (test_inputs, test_results)
else:
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = (validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = (test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
class MNIST(object):
""" Utils of loading, processing, and batch-emitting of MNIST dataset.
The MNIST are (28, 28)-pixal images.
Args:
noise_std:
`float`, as the standard derivative of Gaussian noise that is to
add to output `y`.
batch_size:
`int`, as the size of mini-batch of training data. We employ no
mini-batch for test data.
dtype:
Numpy `Dtype` object of `float`, optional. As the dtype of output
data. Default is `np.float32`.
seed:
`int` or `None`, optional. If `int`, then set the random-seed of
the noise in the output data. If `None`, do nothing. This arg is
for debugging. Default is `None`.
Attributes:
training_data:
Tuple of three numpy arries, as `x`, `y`, and `y_error` for training
data, with shape `(50000, 784)`, `(50000, 10)`, and `(50000, 10)`
respectively.
validation_data:
Tuple of three numpy arries, as `x`, `y`, and `y_error` for
validation data, with shape `(10000, 784)`, `(10000,)`, and
`(10000,)` respectively.
test_data:
Tuple of three numpy arries, as `x`, `y`, and `y_error` for test
data, with shape `(10000, 784)`, `(10000)`, and `(10000)`
respectively.
n_data:
`int`, as the number of training_data.
batch_size:
`int`, as the batch-size of training_data, as the same argument in
`__init__`.
n_batches_per_epoch:
`int`, as the number of mini-batches per epoch.
Methods:
batch_generator:
Used to define a generator that emits mini-batch of training data,
by acting `next()`.
"""
def __init__(self, noise_std, batch_size,
dtype=np.float32, seed=None,
verbose=True):
self._dtype = dtype
self._noise_std = noise_std
self.batch_size = batch_size
if seed is not None:
np.random.seed(seed)
self._get_data()
def _get_data(self):
""" Generate attributes: `training_data`, `validation_data`, and
`test_data`, as well as `n_data` and `n_batches_per_epoch`. """
training_data, validation_data, test_data = load_data_wrapper()
# Preprocess training data
x_tr, y_tr = training_data
x_tr = self._preprocess(x_tr)
y_tr = self._preprocess(y_tr)
y_err_tr = self._noise_std * np.ones(y_tr.shape, dtype=self._dtype)
self.training_data = (x_tr, y_tr, y_err_tr)
self.n_data = x_tr.shape[0]
self.n_batches_per_epoch = round(self.n_data/self.batch_size)
# Preprocess training data
x_va, y_va = validation_data
x_va = self._preprocess(x_va)
y_va = self._preprocess(y_va)
y_err_va = 0.0 * np.ones(y_va.shape, dtype=self._dtype)
self.validation_data = (x_va, y_va, y_err_va)
# Preprocess test data
x_te, y_te = test_data
x_te = self._preprocess(x_te)
y_te = self._preprocess(y_te)
y_err_te = 0.0 * np.ones(y_te.shape, dtype=self._dtype)
self.test_data = (x_te, y_te, y_err_te)
def _preprocess(self, data):
""" Preprocessing MNIST data, including converting to numpy array,
re-arrange the shape and dtype.
Args:
data:
Any element of the tuple as the output of calling
`mnist_loader.load_data_wrapper()`.
Returns:
The preprocessed, as numpy array. (This copies the input `data`,
so that the input `data` will not be altered.)
"""
data = np.asarray(data, dtype=self._dtype)
data = np.squeeze(data)
return data
def batch_generator(self):
""" Used to define a generator that emits mini-batch of training data,
by acting `next()`.
Example:
``` python:
mnist_ = MNIST(...)
batch_generator = mnist_.batch_generator()
x_batch, y_batch, y_error_batch = next(batch_generator)
````
Returns:
Tuple of three numpy arraies `(x, y, y_error)`, for the inputs of the
model, the observed outputs of the model , and the standard derivatives
of the observation, respectively. They are used for training only.
"""
x, y, y_err = self.training_data
batch_size = self.batch_size
n_data = self.n_data
while True:
x, y, y_err = shuffle(x, y, y_err) # XXX: copy ???
for k in range(0, n_data, batch_size):
mini_batch = (x[k:k+batch_size],
y[k:k+batch_size],
y_err[k:k+batch_size])
yield mini_batch
if __name__ == '__main__':
""" Test. """
mnist_ = MNIST(noise_std=0.1, batch_size=128)
| gpl-3.0 |
shusenl/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
dparks1134/PETs | scripts/plotSeqLenVsSupportedSplits.py | 1 | 2100 | #!/usr/bin/env python
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2013'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '1.0.0'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import sys, argparse
from colorsys import hsv_to_rgb
from math import atan2, pi, sin, cos
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
from numpy import mean, min, cov, dot, linalg
def doWork(args):
# read clustering data
geneIdToSupportedSplits = {}
for line in open(args.cluster):
if line.strip() == '' or line[0] == '%':
continue
lineSplit = line.split('\t')
geneIdToSupportedSplits[lineSplit[0]] = int(lineSplit[2])
# read sequence length
geneIdToLen = {}
for line in open(args.seqLen):
lineSplit = line.split('\t')
geneIdToLen[lineSplit[0]] = float(lineSplit[1])
# plot results
x = []
y = []
for geneId in geneIdToSupportedSplits:
x.append(geneIdToLen[geneId])
y.append(geneIdToSupportedSplits[geneId])
fig = plt.figure()
fig.set_size_inches(args.width, args.width)
ax = fig.add_axes([0.13,0.08,.82,.87])
ax.set_xlabel('Sequence Length', fontsize=10)
ax.set_ylabel('Supported Splits', fontsize=10)
ax.scatter(x, y, s = 18, lw=0.5)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.savefig(args.output, dpi = args.dpi)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('cluster', help='File indicating clustering of genes.')
parser.add_argument('seqLen', help='File indicating length of genes.')
parser.add_argument('output', help='Output image file. Specify format with extension: .jpg, .png, .pdf, .svg.')
parser.add_argument('--dpi', help='Resolution of output image (default = 600).', type=int, default=600)
parser.add_argument('-w', '--width', help='Width of image in inches (default = 6).', type=float, default=6)
args = parser.parse_args()
doWork(args) | gpl-3.0 |
Gillu13/scipy | doc/source/conf.py | 17 | 10926 | # -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'scipyoptdoc']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-2016, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("https://scipy.org/", "Scipy.org"),
("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'https://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
| bsd-3-clause |
wrshoemaker/ffpopsim | examples/neutral_coalescent.py | 2 | 1560 | import FFPopSim as h
import numpy as np
from matplotlib import pyplot as plt
import random as rd
L=100
pop=h.haploid_highd(L)
pop.outcrossing_rate=0.1
pop.crossover_rate=1.0/pop.L
pop.mutation_rate=5.0/pop.L
pop.carrying_capacity=30
#track the loci 10, 50 and 90
pop.track_locus_genealogy([10,50,90])
#initialize the populations
pop.set_wildtype(pop.carrying_capacity)
pop.status()
#evolve population for several coalescent times
pop.evolve(4*pop.N)
#get tree at locus 10
tree = pop.genealogy.get_tree(10)
print "\nTree statistics:"
print "Branch length:", tree.total_branch_length()
print "External branch length:", tree.external_branch_length()
print "Number of leafs:", len(tree.leafs)
print "Number of internal nodes:", len(tree.nodes)-len(tree.leafs)
print "Time to MRCA:", pop.generation - tree.MRCA.age
#produce a subtree with of a sample of leafs
n = 5
subsample = rd.sample(tree.leafs, n)
sub_tree = tree.create_subtree_from_keys(subsample)
print "\nSubtree with",n,"leafs"
print sub_tree.print_newick()
print "Each tree label is composed of the index if the individual and the size of the clone"
#trees can be exported as a BioPython tree structure
from Bio import Phylo as P
BPtree = tree.to_Biopython_tree()
plt.figure()
P.draw(BPtree)
#in absence of recombination, trees at all three loci are identical.
#with crossovers, tree decouple with increasing outcrossing rate.
for locus in pop.genealogy.loci:
BPtree = pop.genealogy.get_tree(locus).to_Biopython_tree()
plt.figure()
plt.title('Tree at locus '+str(locus))
P.draw(BPtree)
| gpl-3.0 |
ClusterWhisperer/clusterstats | clusterstats/stats.py | 1 | 1664 | """
stats.py
~~~~~~~~
"""
import json
import pandas as pd
FIELD_APPLICATION = 'Application'
FIELD_VERSION = 'Version'
FIELD_SUCCESS_COUNT = 'Success_Count'
OPERATOR_ADD = "+"
def calc_qos(total_queries, success_queries_cnt):
""" Calculate QoS """
return (float(success_queries_cnt)/float(total_queries)) * 100
def check_qos(threshold, total_queries, success_queries_cnt):
""" Validate QoS """
return calc_qos(total_queries, success_queries_cnt) >= threshold
def calc_stats(data, group_by_fields, aggregate_field, aggregate_operator):
""" Generic function to apply group by and aggregation operation on the dataset.
Args:
data - list of dictionary
group_by_fields - fields that will be grouped by
aggregate_field - field on which aggreagation operator applied
aggregate_operator - aggregation operation, currently supports only sum.
Returns: DataFrame Object
"""
data_frame = pd.read_json(json.dumps(data))
if aggregate_operator != OPERATOR_ADD:
raise ValueError("Unsupported aggregate operator:{}".format(aggregate_operator))
return data_frame.groupby(group_by_fields).agg({aggregate_field : sum})
def write_stats(data_frame, output_dir):
"""Given the data_frame write csv output in the output directory with the
filename as current time stamp.
Args:
- data_frame: DataFrame that to be serialized to the file.
- output_dir: File output directory.
Returns: the file path
"""
import time
import os.path
millis = int(round(time.time() * 1000))
path = os.path.join(output_dir, "{}.csv".format(millis))
data_frame.to_csv(path)
return path
| mit |
poryfly/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/indexes/period/test_arithmetic.py | 2 | 4539 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
from pandas import PeriodIndex, period_range
import pandas.util.testing as tm
class TestPeriodIndexArithmetic(object):
# ---------------------------------------------------------------
# PeriodIndex.shift is used by __add__ and __sub__
def test_pi_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'],
freq='M', name='idx')
tm.assert_index_equal(result, expected)
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT', '2010-12'],
freq='M', name='idx')
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = period_range(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = period_range(freq='A', start='1/1/2002', end='12/1/2010')
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = period_range(freq='A', start='1/1/2000', end='12/1/2008')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = period_range(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = period_range(freq='M', start='2/1/2001', end='1/1/2010')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = period_range(freq='M', start='12/1/2000', end='11/1/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = period_range(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = period_range(freq='D', start='1/2/2001', end='12/2/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = period_range(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = period_range(freq='D', start='12/31/2000', end='11/30/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_corner_cases(self):
# GH#9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with pytest.raises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'],
freq='M', name='idx')
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH#8083
drange = pd.period_range('20130101', periods=5, freq='D')
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_periods(self):
# GH #22458 : argument 'n' was deprecated in favor of 'periods'
idx = period_range(freq='A', start='1/1/2001', end='12/1/2009')
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=True):
tm.assert_index_equal(idx.shift(n=0), idx)
| bsd-3-clause |
jorisvandenbossche/geopandas | geopandas/tests/test_types.py | 3 | 2818 | from __future__ import absolute_import
import numpy as np
from shapely.geometry import Point
from pandas import Series, DataFrame
from geopandas import GeoSeries, GeoDataFrame
from geopandas.tests.util import unittest
OLD_PANDAS = issubclass(Series, np.ndarray)
class TestSeries(unittest.TestCase):
def setUp(self):
N = self.N = 10
r = 0.5
self.pts = GeoSeries([Point(x, y) for x, y in zip(range(N), range(N))])
self.polys = self.pts.buffer(r)
def test_slice(self):
assert type(self.pts[:2]) is GeoSeries
assert type(self.pts[::2]) is GeoSeries
assert type(self.polys[:2]) is GeoSeries
def test_head(self):
assert type(self.pts.head()) is GeoSeries
def test_tail(self):
assert type(self.pts.tail()) is GeoSeries
def test_sort_index(self):
assert type(self.pts.sort_index()) is GeoSeries
def test_loc(self):
assert type(self.pts.loc[5:]) is GeoSeries
def test_iloc(self):
assert type(self.pts.iloc[5:]) is GeoSeries
def test_fancy(self):
idx = (self.pts.index.to_series() % 2).astype(bool)
assert type(self.pts[idx]) is GeoSeries
def test_take(self):
assert type(self.pts.take(list(range(0, self.N, 2)))) is GeoSeries
def test_select(self):
assert type(self.pts.select(lambda x: x % 2 == 0)) is GeoSeries
@unittest.skipIf(OLD_PANDAS, 'Groupby not supported on pandas <= 0.12')
def test_groupby(self):
for f, s in self.pts.groupby(lambda x: x % 2):
assert type(s) is GeoSeries
class TestDataFrame(unittest.TestCase):
def setUp(self):
N = 10
self.df = GeoDataFrame([
{'geometry' : Point(x, y), 'value1': x + y, 'value2': x*y}
for x, y in zip(range(N), range(N))])
def test_geometry(self):
assert type(self.df.geometry) is GeoSeries
# still GeoSeries if different name
df2 = GeoDataFrame({"coords": [Point(x,y) for x, y in zip(range(5),
range(5))],
"nums": range(5)}, geometry="coords")
assert type(df2.geometry) is GeoSeries
assert type(df2['coords']) is GeoSeries
def test_nongeometry(self):
assert type(self.df['value1']) is Series
def test_geometry_multiple(self):
assert type(self.df[['geometry', 'value1']]) is GeoDataFrame
def test_nongeometry_multiple(self):
assert type(self.df[['value1', 'value2']]) is DataFrame
def test_slice(self):
assert type(self.df[:2]) is GeoDataFrame
assert type(self.df[::2]) is GeoDataFrame
def test_fancy(self):
idx = (self.df.index.to_series() % 2).astype(bool)
assert type(self.df[idx]) is GeoDataFrame
| bsd-3-clause |
johnb30/eventscale | eventscale.py | 1 | 2471 | from __future__ import division
import pandas as pd
import os
def _get_data(path):
"""Private function to get the absolute path to the installed files."""
cwd = os.path.abspath(os.path.dirname(__file__))
return os.path.join(cwd, 'data', path)
class Scale():
def __init__(self, data, merge_var=None):
self.data = data
self.orig_vars = data.columns.tolist()
if not merge_var:
print 'Must provide a variable on which to merge!'
else:
self.merge_var = merge_var
if self.merge_var not in self.orig_vars:
print 'The `merge_var` variable does not exist in the oringal \
data!'
self.columns = [self.merge_var, 'total_count']
self.dataset_map = {'daily': 'daily_scale.csv',
'monthly': 'monthly_scale.csv',
'yearly': 'yearly_scale.csv',
'daily_monadic': 'daily_scale_monadic.csv',
'monthly_monadic': 'monthly_scale_monadic.csv',
'yearly_monadic': 'yearly_scale_monadic.csv'
}
def _prep_data(self, in_data, col_names=None):
data = pd.read_csv(_get_data(in_data), sep='\t', names=self.col_names)
data[self.merge_var] = data[self.merge_var].map(lambda x: int(x))
return data
def _transform_data(self, dataset, new_name=None, old_name=None,):
try:
hold_data = pd.merge(self.data, dataset, on=self.merge_name)
except KeyError:
print 'The variable names for merging do not match.'
hold_data[new_name] = (hold_data[old_name] /
hold_data['total_count'])
output_data = hold_data[self.orig_names + [new_name]]
return output_data
def scale(self, scale=None, new_var='scaled_count', old_var=None, outpath=None):
if not scale:
print 'Please indicate which type of scaling should be used!'
scale_data = self._prep_data(self.dataset_map[scale])
if not old_var:
print 'Please indicate which variable should be scaled.'
else:
final_data = self._transform_data(scale_data, old_name=old_var,
new_name=new_var)
if outpath:
final_data.to_csv(outpath, sep='\t', index=False)
else:
return final_data
| mit |
ishanic/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
vals/GPclust | GPclust/MOG.py | 4 | 6407 | # Copyright (c) 2012, 2013, 2014 James Hensman
# Licensed under the GPL v3 (see LICENSE.txt)
import numpy as np
try:
from .utilities import multiple_pdinv, lngammad
except ImportError:
from .np_utilities import multiple_pdinv, lngammad
from scipy.special import gammaln, digamma
from scipy import stats
from .collapsed_mixture import CollapsedMixture
class MOG(CollapsedMixture):
"""
A Mixture of Gaussians, using the fast variational framework
Arguments
=========
X - a np.array of the observed data: each row contains one datum.
K - the number of clusters (or initial number of clusters in the Dirichlet Process case)
alpha - the A priori dirichlet concentrationn parameter (default 1.)
prior_Z - either 'symmetric' or 'DP', specifies whether to use a symmetric Dirichlet prior for the clusters, or a (truncated) Dirichlet process.
name - a convenient string for printing the model (default MOG)
Optional arguments for the parameters of the Gaussian-Wishart priors on the clusters
prior_m - prior mean (defaults to mean of the data)
prior_kappa - prior connectivity (default 1e-6)
prior_S - prior Wishart covariance (defaults to 1e-3 * I)
prior_v - prior Wishart degrees of freedom (defaults to dimension of the problem +1.)
"""
def __init__(self, X, K=2, prior_Z='symmetric', alpha=1., prior_m=None, prior_kappa=1e-6, prior_S=None, prior_v=None, name='MOG'):
self.X = X
self.N, self.D = X.shape
# store the prior cluster parameters
self.m0 = self.X.mean(0) if prior_m is None else prior_m
self.k0 = prior_kappa
self.S0 = np.eye(self.D)*1e-3 if prior_S is None else prior_S
self.v0 = prior_v or self.D+1.
#precomputed stuff
self.k0m0m0T = self.k0*self.m0[:,np.newaxis]*self.m0[np.newaxis,:]
self.XXT = self.X[:,:,np.newaxis]*self.X[:,np.newaxis,:]
self.S0_halflogdet = np.sum(np.log(np.sqrt(np.diag(np.linalg.cholesky(self.S0)))))
CollapsedMixture.__init__(self, self.N, K, prior_Z, alpha, name=name)
self.do_computations()
def do_computations(self):
#computations needed for bound, gradient and predictions
self.kNs = self.phi_hat + self.k0
self.vNs = self.phi_hat + self.v0
self.Xsumk = np.tensordot(self.X,self.phi,((0),(0))) #D x K
Ck = np.tensordot(self.phi, self.XXT,((0),(0))).T# D x D x K
self.mun = (self.k0*self.m0[:,None] + self.Xsumk)/self.kNs[None,:] # D x K
self.munmunT = self.mun[:,None,:]*self.mun[None,:,:]
self.Sns = self.S0[:,:,None] + Ck + self.k0m0m0T[:,:,None] - self.kNs[None,None,:]*self.munmunT
self.Sns_inv, self.Sns_halflogdet = multiple_pdinv(self.Sns)
def bound(self):
"""Compute the lower bound on the model evidence. """
return -0.5*self.D*np.sum(np.log(self.kNs/self.k0))\
+self.K*self.v0*self.S0_halflogdet - np.sum(self.vNs*self.Sns_halflogdet)\
+np.sum(lngammad(self.vNs, self.D))- self.K*lngammad(self.v0, self.D)\
+self.mixing_prop_bound()\
+self.H\
-0.5*self.N*self.D*np.log(np.pi)
def vb_grad_natgrad(self):
"""Gradients of the bound"""
x_m = self.X[:,:,None]-self.mun[None,:,:]
dS = x_m[:,:,None,:]*x_m[:,None,:,:]
SnidS = self.Sns_inv[None,:,:,:]*dS
dlndtS_dphi = np.dot(np.ones(self.D), np.dot(np.ones(self.D), SnidS))
grad_phi = (-0.5*self.D/self.kNs + 0.5*digamma((self.vNs-np.arange(self.D)[:,None])/2.).sum(0) + self.mixing_prop_bound_grad() - self.Sns_halflogdet -1.) + (self.Hgrad-0.5*dlndtS_dphi*self.vNs)
natgrad = grad_phi - np.sum(self.phi*grad_phi, 1)[:,None] # corrects for softmax (over) parameterisation
grad = natgrad*self.phi
return grad.flatten(), natgrad.flatten()
def predict_components_ln(self, Xnew):
"""The log predictive density under each component at Xnew"""
Dist = Xnew[:,:,np.newaxis]-self.mun[np.newaxis,:,:] # Nnew x D x K
tmp = np.sum(Dist[:,:,None,:]*self.Sns_inv[None,:,:,:],1)#*(kn+1.)/(kn*(vn-self.D+1.))
mahalanobis = np.sum(tmp*Dist, 1)/(self.kNs+1.)*self.kNs*(self.vNs-self.D+1.)
halflndetSigma = self.Sns_halflogdet + 0.5*self.D*np.log((self.kNs+1.)/(self.kNs*(self.vNs-self.D+1.)))
Z = gammaln(0.5*(self.vNs[np.newaxis,:]+1.))\
-gammaln(0.5*(self.vNs[np.newaxis,:]-self.D+1.))\
-(0.5*self.D)*(np.log(self.vNs[np.newaxis,:]-self.D+1.) + np.log(np.pi))\
- halflndetSigma \
- (0.5*(self.vNs[np.newaxis,:]+1.))*np.log(1.+mahalanobis/(self.vNs[np.newaxis,:]-self.D+1.))
return Z
def predict_components(self, Xnew):
"""The predictive density under each component at Xnew"""
return np.exp(self.predict_components_ln(Xnew))
def predict(self, Xnew):
"""The predictive density of the model at Xnew"""
Z = self.predict_components(Xnew)
#calculate the weights for each component
phi_hat = self.phi.sum(0)
pi = phi_hat+self.alpha
pi /= pi.sum()
Z *= pi[np.newaxis,:]
return Z.sum(1)
def plot(self, newfig=True):
from matplotlib import pyplot as plt
if self.X.shape[1]==2:
if newfig:plt.figure()
xmin, ymin = self.X.min(0)
xmax, ymax = self.X.max(0)
xmin, xmax = xmin-0.1*(xmax-xmin), xmax+0.1*(xmax-xmin)
ymin, ymax = ymin-0.1*(ymax-ymin), ymax+0.1*(ymax-ymin)
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
Xgrid = np.vstack((xx.flatten(), yy.flatten())).T
zz = self.predict(Xgrid).reshape(100, 100)
zz_data = self.predict(self.X)
plt.contour(xx, yy, zz, [stats.scoreatpercentile(zz_data, 5)], colors='k', linewidths=3)
plt.scatter(self.X[:,0], self.X[:,1], 30, np.argmax(self.phi, 1), linewidth=0, cmap=plt.cm.gist_rainbow)
zz_components = self.predict_components(Xgrid)
phi_hat = self.phi.sum(0)
pi = phi_hat+self.alpha
pi /= pi.sum()
zz_components *= pi[np.newaxis,:]
[plt.contour(xx, yy, zz.reshape(100, 100), [stats.scoreatpercentile(zz_data, 5.)], colors='k', linewidths=1) for zz in zz_components.T]
else:
print("plotting only for 2D mixtures")
| gpl-3.0 |
anirudhjayaraman/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/indexes/test_frozen.py | 18 | 2435 | import numpy as np
from pandas.util import testing as tm
from pandas.tests.test_base import CheckImmutable, CheckStringMixin
from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
from pandas.compat import u
class TestFrozenList(CheckImmutable, CheckStringMixin):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, method):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, method):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
assert not isinstance(self.container.view(np.ndarray), FrozenNDArray)
assert self.container.view() is not self.container
tm.assert_numpy_array_equal(self.container, original)
# Shallow copy should be the same too
assert isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container):
container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
tm.assert_numpy_array_equal(original, vals)
assert original is not vals
vals[0] = n
assert isinstance(self.container, FrozenNDArray)
tm.assert_numpy_array_equal(self.container.values(), original)
assert vals[0] == n
| mit |
schets/scikit-learn | sklearn/utils/tests/test_extmath.py | 13 | 16336 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
davidgbe/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
animesh-garg/dtw | dtw.py | 1 | 2764 | from numpy import array, zeros, argmin, inf
def dtw(x, y, dist):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])
if len(x)==1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def _traceback(D):
i, j = array(D.shape) - 2
p, q = [i], [j]
while ((i > 0) or (j > 0)):
tb = argmin((D[i, j], D[i, j+1], D[i+1, j]))
if (tb == 0):
i -= 1
j -= 1
elif (tb == 1):
i -= 1
else: # (tb == 2):
j -= 1
p.insert(0, i)
q.insert(0, j)
return array(p), array(q)
if __name__ == '__main__':
if 0: # 1-D numeric
from sklearn.metrics.pairwise import manhattan_distances
x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0]
y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0]
dist_fun = manhattan_distances
elif 0: # 2-D numeric
from sklearn.metrics.pairwise import euclidean_distances
x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]]
y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]]
dist_fun = euclidean_distances
else: # 1-D list of strings
from nltk.metrics.distance import edit_distance
#x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder']
#y = ['class', 'too']
x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls']
y = ['see', 'drown', 'himself']
#x = 'we talked about the situation'.split()
#y = 'we talked about the situation'.split()
dist_fun = edit_distance
dist, cost, acc, path = dtw(x, y, dist_fun)
# vizualize
from matplotlib import pyplot as plt
plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest')
plt.plot(path[0], path[1], '-o') # relation
plt.xticks(range(len(x)), x)
plt.yticks(range(len(y)), y)
plt.xlabel('x')
plt.ylabel('y')
plt.axis('tight')
plt.title('Minimum distance: {}'.format(dist))
plt.show()
| gpl-3.0 |
vladsaveliev/bcbio-nextgen | bcbio/structural/validate.py | 4 | 19051 | """Provide validation of structural variations against truth sets.
"""
import csv
import os
import six
import toolz as tz
import numpy as np
import pandas as pd
import pybedtools
from bcbio.log import logger
from bcbio import utils
from bcbio.bam import ref
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import convert
from bcbio.distributed.transaction import file_transaction
from bcbio.variation import vcfutils, ploidy, validateplot
from bcbio.pipeline import config_utils
mpl = utils.LazyImport("matplotlib")
plt = utils.LazyImport("matplotlib.pyplot")
sns = utils.LazyImport("seaborn")
# -- VCF based validation
def _evaluate_vcf(calls, truth_vcf, work_dir, data):
out_file = os.path.join(work_dir, os.path.join("%s-sv-validate.csv" % dd.get_sample_name(data)))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
for call in calls:
detail_dir = utils.safe_makedir(os.path.join(work_dir, call["variantcaller"]))
if call.get("vrn_file"):
for stats in _validate_caller_vcf(call["vrn_file"], truth_vcf, dd.get_sample_callable(data),
call["variantcaller"], detail_dir, data):
writer.writerow(stats)
return out_file
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data):
"""Validate a caller VCF against truth within callable regions using SURVIVOR.
Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
"""
stats = _calculate_comparison_stats(truth_vcf)
call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data),
stats, work_dir, data)
truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0],
"%s-truth" % dd.get_sample_name(data), stats, work_dir, data)
cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data)
return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
def _comparison_stats_from_merge(in_file, stats, svcaller, data):
"""Extract true/false positive/negatives from a merged SURIVOR VCF.
"""
truth_stats = {"tp": [], "fn": [], "fp": []}
samples = ["truth" if x.endswith("-truth") else "eval" for x in vcfutils.get_samples(in_file)]
with open(in_file) as in_handle:
for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")):
supp_vec_str = [x for x in call[7].split(";") if x.startswith("SUPP_VEC=")][0]
_, supp_vec = supp_vec_str.split("=")
calls = dict(zip(samples, [int(x) for x in supp_vec]))
if calls["truth"] and calls["eval"]:
metric = "tp"
elif calls["truth"]:
metric = "fn"
else:
metric = "fp"
truth_stats[metric].append(_summarize_call(call))
return _to_csv(truth_stats, stats, dd.get_sample_name(data), svcaller)
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data):
"""Perform a merge of two callsets using SURVIVOR,
"""
out_file = os.path.join(work_dir, "eval-merge.vcf")
if not utils.file_uptodate(out_file, call_vcf):
in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf")
if not utils.file_exists(in_call_vcf):
with file_transaction(data, in_call_vcf) as tx_in_call_vcf:
do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals()))
in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf")
if not utils.file_exists(in_truth_vcf):
with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf:
do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals()))
in_list_file = os.path.join(work_dir, "eval-inputs.txt")
with open(in_list_file, "w") as out_handle:
out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf))
with file_transaction(data, out_file) as tx_out_file:
cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}")
do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data))
return out_file
def _to_csv(truth_stats, stats, sample, svcaller):
out = []
for metric, vals in truth_stats.items():
for svtype in sorted(list(stats["svtypes"])):
count = len([x for x in vals if x["svtype"] == svtype])
out.append([sample, svcaller, svtype, metric, count])
for start, end in stats["ranges"]:
count = len([x for x in vals if (x["svtype"] == svtype
and x["size"] >= start and x["size"] < end)])
out.append([sample, svcaller, "%s_%s-%s" % (svtype, start, end), metric, count])
return out
def _calculate_comparison_stats(truth_vcf):
"""Identify calls to validate from the input truth VCF.
"""
# Avoid very small events for average calculations
min_stat_size = 50
min_median_size = 250
sizes = []
svtypes = set([])
with utils.open_gzipsafe(truth_vcf) as in_handle:
for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")):
stats = _summarize_call(call)
if stats["size"] > min_stat_size:
sizes.append(stats["size"])
svtypes.add(stats["svtype"])
pct10 = int(np.percentile(sizes, 10))
pct25 = int(np.percentile(sizes, 25))
pct50 = int(np.percentile(sizes, 50))
pct75 = int(np.percentile(sizes, 75))
ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50),
(pct50, pct75), (pct75, max(sizes))]
ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))]
return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05),
"svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)),
"ranges": []}
def _get_start_end(parts, index=7):
"""Retrieve start and end for a VCF record, skips BNDs without END coords
"""
start = parts[1]
end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")]
if end:
end = end[0]
return start, end
return None, None
def _summarize_call(parts):
"""Provide summary metrics on size and svtype for a SV call.
"""
svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")]
svtype = svtype[0] if svtype else ""
start, end = _get_start_end(parts)
return {"svtype": svtype, "size": int(end) - int(start)}
def _prep_vcf(in_file, region_bed, sample, new_sample, stats, work_dir, data):
"""Prepare VCF for SV validation:
- Subset to passing variants
- Subset to genotyped variants -- removes reference and no calls
- Selects and names samples
- Subset to callable regions
- Remove larger annotations which slow down VCF processing
"""
in_file = vcfutils.bgzip_and_index(in_file, data, remove_orig=False)
out_file = os.path.join(work_dir, "%s-vprep.vcf.gz" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
callable_bed = _prep_callable_bed(region_bed, work_dir, stats, data)
with file_transaction(data, out_file) as tx_out_file:
ann_remove = _get_anns_to_remove(in_file)
ann_str = " | bcftools annotate -x {ann_remove}" if ann_remove else ""
cmd = ("bcftools view -T {callable_bed} -f 'PASS,.' --min-ac '1:nref' -s {sample} {in_file} "
+ ann_str +
r"| sed 's|\t{sample}|\t{new_sample}|' "
"| bgzip -c > {out_file}")
do.run(cmd.format(**locals()), "Create SV validation VCF for %s" % new_sample)
return vcfutils.bgzip_and_index(out_file, data["config"])
def _prep_callable_bed(in_file, work_dir, stats, data):
"""Sort and merge callable BED regions to prevent SV double counting
"""
out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0])
gsort = config_utils.get_program("gsort", data)
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
fai_file = ref.fasta_idx(dd.get_ref_file(data))
cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | "
"bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare SV callable BED regions")
return vcfutils.bgzip_and_index(out_file, data["config"])
def _get_anns_to_remove(in_file):
"""Find larger annotations, if present in VCF, that slow down processing.
"""
to_remove = ["ANN", "LOF"]
to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove])
cur_remove = []
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
break
elif line.startswith(to_remove_str):
cur_id = line.split("ID=")[-1].split(",")[0]
cur_remove.append("INFO/%s" % cur_id)
return ",".join(cur_remove)
# -- BED based evaluation
EVENT_SIZES = [(100, 450), (450, 2000), (2000, 4000), (4000, 20000), (20000, 60000),
(60000, int(1e6))]
def _stat_str(x, n):
if n > 0:
val = float(x) / float(n) * 100.0
return {"label": "%.1f%% (%s / %s)" % (val, x, n), "val": val}
else:
return {"label": "", "val": 0}
def cnv_to_event(name, data):
"""Convert a CNV to an event name.
"""
cur_ploidy = ploidy.get_ploidy([data])
if name.startswith("cnv"):
num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")])
if num < cur_ploidy:
return "DEL"
elif num > cur_ploidy:
return "DUP"
else:
return name
else:
return name
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data):
"""Compare a ensemble results for a caller against a specific caller and SV type.
"""
def cnv_matches(name):
return cnv_to_event(name, data) == svtype
def is_breakend(name):
return name.startswith("BND")
def in_size_range(max_buffer=0):
def _work(feat):
minf, maxf = size_range
buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0))
size = feat.end - feat.start
return size >= max([0, minf - buffer]) and size < maxf + buffer
return _work
def is_caller_svtype(feat):
for name in feat.name.split(","):
if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name))
and (caller == "sv-ensemble" or name.endswith(caller))):
return True
return False
minf, maxf = size_range
efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge()
tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas()
etotal = efeats.count()
ttotal = tfeats.count()
match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count()
return {"sensitivity": _stat_str(match, ttotal),
"precision": _stat_str(match, etotal)}
def _evaluate_multi(calls, truth_svtypes, work_dir, data):
base = os.path.join(work_dir, "%s-sv-validate" % (dd.get_sample_name(data)))
out_file = base + ".csv"
df_file = base + "-df.csv"
if any((not utils.file_uptodate(out_file, x["vrn_file"])
or not utils.file_uptodate(df_file, x["vrn_file"])) for x in calls):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with open(df_file, "w") as df_out_handle:
writer = csv.writer(out_handle)
dfwriter = csv.writer(df_out_handle)
writer.writerow(["svtype", "size", "caller", "sensitivity", "precision"])
dfwriter.writerow(["svtype", "size", "caller", "metric", "value", "label"])
for svtype, truth in truth_svtypes.items():
for size in EVENT_SIZES:
str_size = "%s-%s" % size
for call in calls:
call_bed = convert.to_bed(call, dd.get_sample_name(data), work_dir, calls, data)
if utils.file_exists(call_bed):
evalout = _evaluate_one(call["variantcaller"], svtype, size, call_bed,
truth, data)
writer.writerow([svtype, str_size, call["variantcaller"],
evalout["sensitivity"]["label"], evalout["precision"]["label"]])
for metric in ["sensitivity", "precision"]:
dfwriter.writerow([svtype, str_size, call["variantcaller"], metric,
evalout[metric]["val"], evalout[metric]["label"]])
return out_file, df_file
def _plot_evaluation(df_csv):
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
mpl.use('Agg', force=True)
df = pd.read_csv(df_csv).fillna("0%")
out = {}
for event in df["svtype"].unique():
out[event] = _plot_evaluation_event(df_csv, event)
return out
def _plot_evaluation_event(df_csv, svtype):
"""Provide plot of evaluation metrics for an SV event, stratified by event size.
"""
titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications",
"INS": "Insertions"}
out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype)
sns.set(style='white')
if not utils.file_uptodate(out_file, df_csv):
metrics = ["sensitivity", "precision"]
df = pd.read_csv(df_csv).fillna("0%")
df = df[(df["svtype"] == svtype)]
event_sizes = _find_events_to_include(df, EVENT_SIZES)
fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True)
if len(event_sizes) == 1:
axs = [axs]
callers = sorted(df["caller"].unique())
if "sv-ensemble" in callers:
callers.remove("sv-ensemble")
callers.append("sv-ensemble")
for i, size in enumerate(event_sizes):
size_label = "%s to %sbp" % size
size = "%s-%s" % size
for j, metric in enumerate(metrics):
ax = axs[i][j]
ax.get_xaxis().set_ticks([])
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(0, 125.0)
if i == 0:
ax.set_title(metric, size=12, y=1.2)
vals, labels = _get_plot_val_labels(df, size, metric, callers)
ax.barh(range(1,len(vals)+1), vals)
if j == 0:
ax.tick_params(axis='y', which='major', labelsize=8)
ax.locator_params(axis="y", tight=True)
ax.set_yticks(range(1,len(callers)+1,1))
ax.set_yticklabels(callers, va="center")
ax.text(100, len(callers)+1, size_label, fontsize=10)
else:
ax.get_yaxis().set_ticks([])
for ai, (val, label) in enumerate(zip(vals, labels)):
ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7)
if svtype in titles:
fig.text(0.025, 0.95, titles[svtype], size=14)
fig.set_size_inches(7, len(event_sizes) + 1)
fig.savefig(out_file)
return out_file
def _find_events_to_include(df, event_sizes):
out = []
for size in event_sizes:
str_size = "%s-%s" % size
curdf = df[(df["size"] == str_size) & (df["metric"] == "sensitivity")]
for val in list(curdf["label"]):
if val != "0%":
out.append(size)
break
return out
def _get_plot_val_labels(df, size, metric, callers):
curdf = df[(df["size"] == size) & (df["metric"] == metric)]
vals, labels = [], []
for caller in callers:
row = curdf[curdf["caller"] == caller]
val = list(row["value"])[0]
if val == 0:
val = 0.1
vals.append(val)
labels.append(list(row["label"])[0])
return vals, labels
# -- general functionality
def evaluate(data):
"""Provide evaluations for multiple callers split by structural variant type.
"""
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "validate"))
truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data)
if truth_sets and data.get("sv"):
if isinstance(truth_sets, dict):
val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data)
summary_plots = _plot_evaluation(df_csv)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv}
else:
assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets
val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data)
title = "%s structural variants" % dd.get_sample_name(data)
summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None}
return data
if __name__ == "__main__":
#_, df_csv = _evaluate_multi(["lumpy", "delly", "wham", "sv-ensemble"],
# {"DEL": "synthetic_challenge_set3_tumor_20pctmasked_truth_sv_DEL.bed"},
# "syn3-tumor-ensemble-filter.bed", "sv_exclude.bed")
#_, df_csv = _evaluate_multi(["lumpy", "delly", "cn_mops", "sv-ensemble"],
# {"DEL": "NA12878.50X.ldgp.molpb_val.20140508.bed"},
# "NA12878-ensemble.bed", "LCR.bed.gz")
import sys
_plot_evaluation(sys.argv[1])
| mit |
cwu2011/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont_subsolar_.2/Optical2.py | 33 | 7437 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
line = [56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73] #S II 6731
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Optical_lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
alexcpsec/coursera-compinvesting1-hw | HW2/hw2_event.py | 2 | 2823 | ## Computational Investing I
## HW 2
##
## Author: alexcpsec
import pandas as pd
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(ls_symbols, d_data):
df_close = d_data['actual_close']
ts_market = df_close['SPY']
print "Finding Events"
# Creating an empty dataframe
df_events = copy.deepcopy(df_close)
df_events = df_events * np.NAN
# Time stamps for the event range
ldt_timestamps = df_close.index
for s_sym in ls_symbols:
for i in range(1, len(ldt_timestamps)):
# Calculating the returns for this timestamp
f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]]
f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]]
if f_symprice_yest >= 6.00 and f_symprice_today < 6.00:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
return df_events
def event_profiler(ldt_timestamps, symbols_list):
dataobj = da.DataAccess('Yahoo')
ls_symbols = dataobj.get_symbols_from_list(symbols_list)
ls_symbols.append('SPY')
ls_keys = ['close','actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method = 'ffill')
d_data[s_key] = d_data[s_key].fillna(method = 'bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_events = find_events(ls_symbols, d_data)
report_filename = "hw2_event_study6_" + symbols_list + ".pdf"
print "Creating Study " + symbols_list
ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20,
s_filename=report_filename, b_market_neutral=True, b_errorbars=True,
s_market_sym='SPY')
if __name__ == '__main__':
dt_start = dt.datetime(2008, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
## Starting up with SP500 2008
event_profiler(ldt_timestamps, 'sp5002008')
## Doing the SP500 2012
event_profiler(ldt_timestamps, 'sp5002012')
| mit |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/scipy/signal/spectral.py | 3 | 13369 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy.lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, 1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
| mit |
hansomesong/TracesAnalyzer | 20160218Tasks/Plot_stability_change_num.py | 1 | 7889 | # -*- coding: utf-8 -*-
__author__ = 'yueli'
import matplotlib.pyplot as plt
from config.config import *
import numpy as np
import math
from collections import Counter
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams.update({'figure.autolayout': True})
def change_num_counter(target_file):
file_num_counter = 0
change_num_list = []
nb_observation = 0.0
with open(target_file) as f_handler:
next(f_handler)
for line in f_handler:
tmp_list = line.split(';')
if tmp_list[LOG_TIME_COLUMN['coherence']] != "True":
file_num_counter += 1
file_path = tmp_list[LOG_TIME_COLUMN['log_file_name']]
with open(file_path) as tmp_handler:
nb_observation = sum(1 for line in tmp_handler)-1.0
if tmp_list[15] != '0':
change_num_list.append(len(tmp_list[15].split(','))/nb_observation*100.0)
else:
if tmp_list[18] != '0':
change_num_list.append(len(tmp_list[18].split(','))/nb_observation*100.0)
return file_num_counter, change_num_list
def cdf(pdf_list):
cdf_list = []
for value in pdf_list:
if not cdf_list:
cdf_list.append(value)
else:
cdf_list.append(value + cdf_list[-1])
return cdf_list
# 计算 <EID, MR, VP> csv file 中的 change number / experiment number
# input = <EID, MR, VP> csv file
# output = experiment number
def experiment_number_counter(target_file):
with open(target_file) as f_handler:
experiment_number = float(sum(1 for line in f_handler)-1.0)
return experiment_number
# 此函数用来统计针对一个 comparison time VP CSV file 里 Coherence列为 False 的文件的变化百分比,
# 对应 <EID, MR, VP> csv file 中的 change number / experiment number
# input = comparison time VP CSV file
# output = percentage_list
def instability_occurence(taget_file):
with open(target_file) as f_handler:
unstable_file_num_counter = 0.0
unstable_percentage_list = []
next(f_handler)
for line in f_handler:
lines = line.split(";")
if lines[LOG_TIME_COLUMN['coherence']] == 'False':
unstable_file_num_counter = unstable_file_num_counter + 1.0
change_number = float(len(lines[LOG_TIME_COLUMN['case1_change_time']].split(",")) + len(lines[LOG_TIME_COLUMN['case3_4_change_time']].split(",")))
file_name = os.path.join(PLANET_CSV_DIR, lines[LOG_TIME_COLUMN['vantage']], "{0}.csv".format(lines[LOG_TIME_COLUMN['log_file_name']].split("/")[-1]))
unstable_percentage_list.append(change_number / experiment_number_counter(file_name) * 100.0)
return unstable_file_num_counter, unstable_percentage_list
if __name__ == '__main__':
# 读取环境变量 ‘PROJECT_LOG_DIR’ (此变量定义在工作目录下.profile或者.bashprofile)
try:
PLANET_DIR = os.environ['PLANETLAB']
CSV_FILE_DESTDIR = os.environ['PROJECT_LOG_DIR']
PLANET_CSV_DIR = os.environ['PLANETLAB_CSV']
PLOT_DIR = os.environ['PROJECT_PLOT_DIR']
except KeyError:
print "Environment variable PROJECT_LOG_DIR is not properly defined or " \
"the definition about this variable is not taken into account."
print "If PROJECT_LOG_DIR is well defined, restart Pycharm to try again!"
# # 创建一个list,用来存储每一个含有NormalReply返回的percentage
# file_total_num = 0
# change_num_total_list = []
#
# # 遍历VP_LIST里的5个vp
# for vp in VP_LIST:
# # for vp in ["wiilab"]:
# # Creat a CSV file 来存储 SRC_reply != Locator 的情况
# target_file = os.path.join(CSV_FILE_DESTDIR, 'comparison_time_{0}.csv'.format(vp))
# print target_file
# file_num_counter, change_num_list = change_num_counter(target_file)
# file_total_num = file_total_num + file_num_counter
# change_num_total_list.extend(change_num_list)
# print "Unstable number =", len(change_num_list)
#
# print "change_num_total_list =", change_num_total_list
# # # 计算 stable file 的数量有多少
# # print "file_total_num =", file_total_num
# # stable_file_num = file_total_num - len(change_num_total_list)
# # print "stable_file_num =", stable_file_num
# # 此处默认每次实验次数都是 802
# # change_num_total_counter = Counter((i/802.0*100.0) for i in change_num_total_list)
# print "change_num_total_list =", len([math.ceil(i*100) for i in change_num_total_list])
# change_num_total_counter = Counter(math.ceil(i*100) for i in change_num_total_list)
# tmp =sorted(change_num_total_counter.items(), key=lambda x: x[0])
#
# pdf_x_axis = [list(i)[0] for i in tmp]
# # pdf_x_axis.insert(0, 0.0)
# pdf_y_axis = [list(i)[1]/(float(file_total_num))*100 for i in tmp]
# # pdf_y_axis.insert(0, float(stable_file_num)/float(file_total_num)*100)
# cdf_y_axis = cdf(pdf_y_axis)
#
#
# print "change_num_total_counter =", change_num_total_counter
# print "pdf_x_axis =", pdf_x_axis
# print "pdf_y_axis =", pdf_y_axis
# print "cdf_y_axis =", cdf_y_axis
#
#
# # Plot part
# # Modify the size and dpi of picture, default size is (8,6), default dpi is 80
# plt.gcf().set_size_inches(10,9)
# # Define font
# font_label = {
# 'fontname' : 'Times New Roman',
# 'color' : 'black',
# 'fontsize' : 40
# }
# plt.grid(True)
# # Plot pdf
# plt.plot(pdf_x_axis, pdf_y_axis, c='black', linewidth=3)
# plt.scatter(pdf_x_axis, pdf_y_axis, c='black', s=80)
# plt.xlabel("instability frequency (%)", fontsize=45, fontname='Times New Roman')
# plt.ylabel("pdf (%)", fontsize=45, fontname='Times New Roman')
# plt.xlim(0, 50)
# plt.ylim(0, 8)
# plt.savefig(os.path.join(PLOT_DIR, 'Plot_newSize', 'pdf_instability_occur.eps'), dpi=300, transparent=True)
# plt.show()
# # Plot cdf
# plt.plot(pdf_x_axis, cdf_y_axis, c='black', linewidth=5)
# # plt.scatter(pdf_x_axis, cdf_y_axis, c='black', s=50)
# plt.xlabel("instability frequency (%)", font_label)
# plt.ylabel("cdf (%)", font_label)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# #plt.xlim(1, 50)
# # plt.savefig(os.path.join(PLOT_DIR, 'Plot_newSize', 'cdf_instability_occur.eps'), dpi=300, transparent=True)
# plt.show()
unstable_total_num = 0.0
unstable_total_per_list = []
for vp in VP_LIST:
target_file = os.path.join(CSV_FILE_DESTDIR, 'comparison_time_{0}.csv'.format(vp))
print target_file
unstable_file_num, unstable_percentage_list = instability_occurence(target_file)
unstable_total_num = unstable_total_num + unstable_file_num
unstable_total_per_list.extend(unstable_percentage_list)
unstable_total_per_int_list = [math.ceil(i) for i in unstable_total_per_list]
x_axis = Counter(unstable_total_per_int_list).keys()
y_pdf_axis = [(float(i) / unstable_total_num * 100) for i in Counter(unstable_total_per_int_list).values()]
y_cdf_axis = cdf(y_pdf_axis)
print unstable_total_num
print Counter(unstable_total_per_int_list)
print x_axis
print y_pdf_axis
print y_cdf_axis
plt.plot(x_axis, y_cdf_axis, c='black', linewidth=5)
# plt.scatter(pdf_x_axis, cdf_y_axis, c='black', s=50)
plt.xlabel(r"instability frequency (\%)", font)
plt.ylabel(r"cdf (\%)", font)
plt.xticks(fontsize=40, fontname="Times New Roman")
plt.yticks(fontsize=40, fontname="Times New Roman")
plt.xlim(1, len(x_axis))
plt.grid(True)
# plt.savefig(os.path.join(PLOT_DIR, 'Plot_newSize', 'cdf_instability_occur.eps'), dpi=300, transparent=True)
plt.show() | gpl-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tests/test_nanops.py | 9 | 40509 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import warnings
import numpy as np
from pandas import Series
from pandas.core.common import isnull, is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(tm.TestCase):
def setUp(self):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1*1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float*np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float,
self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float,
self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan,
self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype('O'),
self.arr_int.astype('O'),
self.arr_bool.astype('O'),
self.arr_complex.astype('O'),
self.arr_str.astype('O'),
self.arr_utf.astype('O'),
self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
self.arr_nan_nanj = self.arr_nan + self.arr_nan*1j
self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf*1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def tearDown(self):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res)
except:
if targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real)
tm.assert_almost_equal(targ.imag, res.imag)
def check_fun_data(self, testfunc, targfunc,
testarval, targarval, targarnanval, **kwargs):
for axis in list(range(targarval.ndim))+[None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
try:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim-1),
'skipna: %s' % skipna,
'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc,
testarval2, targarval2, targarnanval2,
**kwargs)
def check_fun(self, testfunc, targfunc,
testar, targar=None, targarnan=None,
**kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc,
testarval, targarval, targarnanval, **kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar,
'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc,
allow_complex=True, allow_all_nan=True, allow_str=True,
allow_date=True, allow_tdelta=True, allow_obj=True,
**kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'),
self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc,
'arr_complex_nan', 'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'),
self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap,
func=targfunc, allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def check_funs_ddof(self, testfunc, targfunc,
allow_complex=True, allow_all_nan=True, allow_str=True,
allow_date=False, allow_tdelta=False, allow_obj=True,):
for ddof in range(3):
try:
self.check_funs(testfunc, targfunc,
allow_complex, allow_all_nan, allow_str,
allow_date, allow_tdelta, allow_obj,
ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof,)
raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any,
allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all,
allow_all_nan=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum,
allow_str=False, allow_date=False, allow_tdelta=True)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean,
allow_complex=False, allow_obj=False,
allow_str=False, allow_date=False, allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.9.0':
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
self.assertEqual(result, a)
self.assertEqual(result, np_result)
self.assertTrue(result.dtype == np.float64)
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np,'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
self.assertTrue(result.dtype == np.float64,
"return dtype expected from %s is np.float64, got %s instead" % (method, result.dtype))
else:
self.assertTrue(result.dtype == dtype,
"return dtype expected from %s is %s, got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median,
allow_complex=False, allow_str=False, allow_date=False,
allow_tdelta=True,
allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import sem
self.check_funs_ddof(nanops.nansem, sem,
allow_complex=False,
allow_str=False,
allow_date=False,
allow_tdelta=True,
allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func,
allow_str=False, allow_obj=False)
def test_nanmax(self):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func,
allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isnull(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func,
allow_str=False, allow_obj=False,
allow_date=True,
allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func,
allow_date=True,
allow_tdelta=True,
allow_str=False, allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func,
allow_str=False, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
def test_nanskew(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
self.check_funs(nanops.nanskew, func,
allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
self.check_funs(nanops.nankurt, func,
allow_complex=False, allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod,
allow_str=False, allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d,
**kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d)-1,
**kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d)-1,
**kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d,
**kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d,
**kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d,
**kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d)-1,
**kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d)+1,
**kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d,
**kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d)-1,
**kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d,
self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d,
self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d)-1,
**kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d,
**kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d,
**kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d,
**kwargs)
res23 = checkfun(self.arr_float_nan_1d,
self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d,
self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d)-1,
**kwargs)
res25 = checkfun(self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d)+1,
**kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_kendall(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
def test_nancorr_spearman(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_almost_equal(targ1, res1)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_almost_equal(targ2, res2)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim,)
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
self.assertTrue(res0)
else:
self.assertFalse(res0)
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value),)
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False),
('arr_int', False),
('arr_bool', False),
('arr_str', False),
('arr_utf', False),
('arr_complex', False),
('arr_complex_nan', False),
('arr_nan_nanj', False),
('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False),
('arr_nan', False),
('arr_float_nan', False),
('arr_nan_nan', False),
('arr_float_inf', True),
('arr_inf', True),
('arr_nan_inf', True),
('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr,)
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr,)
raise
def test__isfinite(self):
pairs = [('arr_complex', False),
('arr_int', False),
('arr_bool', False),
('arr_str', False),
('arr_utf', False),
('arr_complex', False),
('arr_complex_nan', True),
('arr_nan_nanj', True),
('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False),
('arr_nan', True),
('arr_float_nan', True),
('arr_nan_nan', True),
('arr_float_inf', True),
('arr_inf', True),
('arr_nan_inf', True),
('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr,)
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr,)
raise
def test__bn_ok_dtype(self):
self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test'))
self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test'))
self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test'))
class TestEnsureNumeric(tm.TestCase):
def test_numeric_values(self):
# Test integer
self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int')
# Test float
self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float')
# Test complex
self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j,
'Failed for complex')
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
self.assertTrue(np.allclose(nanops._ensure_numeric(values), values),
'Failed for numeric ndarray')
# Test object ndarray
o_values = values.astype(object)
self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values),
'Failed for object ndarray')
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values),
'Failed for convertible string ndarray')
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
self.assertRaises(ValueError,
lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0),
'Failed for convertible integer string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1),
'Failed for convertible float string')
self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j),
'Failed for convertible complex string')
def test_non_convertable_values(self):
self.assertRaises(TypeError,
lambda: nanops._ensure_numeric('foo'))
self.assertRaises(TypeError,
lambda: nanops._ensure_numeric({}))
self.assertRaises(TypeError,
lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(tm.TestCase):
# xref GH10242
def setUp(self):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
np.testing.assert_almost_equal(
actual_variance, self.variance, decimal=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
np.testing.assert_almost_equal(
actual_variance, self.variance, decimal=2)
actual_variance = nanops.nanvar(samples, skipna=False)
np.testing.assert_almost_equal(
actual_variance, np.nan, decimal=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
np.testing.assert_almost_equal(
actual_std, self.variance ** 0.5, decimal=2)
actual_std = nanops.nanvar(samples, skipna=False)
np.testing.assert_almost_equal(
actual_std, np.nan, decimal=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
np.testing.assert_array_almost_equal(
actual_variance, np.array([self.variance, 1.0 / 12]), decimal=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n+1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
np.testing.assert_almost_equal(variance_1, var, decimal=2)
# The underestimated variance.
np.testing.assert_almost_equal(
variance_0, (n - 1.0) / n * var, decimal=2)
# The overestimated variance.
np.testing.assert_almost_equal(
variance_2, (n - 1.0) / (n - 2.0) * var, decimal=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287],
[0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array(
[[[0.13762259, 0.05619224, 0.11568816],
[0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054],
[0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]]
)
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
np.testing.assert_array_almost_equal(
var[:3], variance[axis, ddof]
)
np.testing.assert_equal(var[3], np.nan)
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
np.testing.assert_array_almost_equal(
std[:3], variance[axis, ddof] ** 0.5
)
np.testing.assert_equal(std[3], np.nan)
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
self.assertEqual(result, 0.0)
@property
def prng(self):
return np.random.RandomState(1234)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure',
'-s'], exit=False)
| mit |
FluidityStokes/fluidity | tests/wetting_and_drying_balzano3_cg_parallel/plotfs_detec.py | 2 | 5094 | #!/usr/bin/env python3
import vtktools
import sys
import math
import re
import matplotlib.pyplot as plt
import getopt
from scipy.special import erf
from numpy import poly1d
from matplotlib.pyplot import figure, show
from numpy import pi, sin, linspace
from matplotlib.mlab import stineman_interp
from numpy import exp, cos
from fluidity_tools import stat_parser
def mirror(x):
return 13800-x
def usage():
print('Usage:')
print('plotfs_detec.py --file=detector_filename --save=filename')
print('--save=... saves the plots as images instead of plotting them on the screen.')
# should be copied from the diamond extrude function. X is 2 dimensional
def bathymetry_function(X):
if X<=3600 or X>6000:
return -X/2760
elif X>3600 and X<=4800:
return X/2760-60.0/23
elif X>4800 and X<=6000:
return -X/920+100.0/23
################# Main ###########################
def main(argv=None):
filename=''
timestep_ana=0.0
dzero=0.01
save='' # If nonempty, we save the plots as images instead if showing them
wetting=False
try:
opts, args = getopt.getopt(sys.argv[1:], "", ['file=','save='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--file':
filename=arg
elif opt == '--save':
save=arg
if filename=='':
print('No filename specified. You have to give the detectors filename.')
usage()
sys.exit(2)
####################### Print time plot ###########################
print('Generating time plot')
s = stat_parser(filename)
timesteps=s["ElapsedTime"]["value"]
timestep=timesteps[1]-timesteps[0]
print("Found ", len(timesteps), " timesteps with dt=", timestep)
if timestep_ana==0.0:
timestep_ana=timestep
fs=s["water"]["FreeSurface"]
print("Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ").")
# Get and plot results
plt.ion() # swith on interactive mode
plt.rcParams['font.size'] = 22
fig2 = figure(figsize=(8, 6.2))
fig2.subplots_adjust(left=0.15, bottom=0.15)
ax2 = fig2.add_subplot(111)
plot_start=580 # in timesteps
plot_end=581 # in timesteps
plot_name=''
for t in range(0,len(timesteps)):
# ignore the first waveperiod
if t<plot_start:
continue
if t>plot_end:
continue
fsvalues=[]
xcoords=[]
for name, item in fs.iteritems():
#print name
xcoords.append(mirror(s[name]['position'][0][0]))
#print xcoord
fsvalues.append(fs[name][t])
# Plot result of one timestep
ax2.plot(xcoords,fsvalues,'b,', label='Numerical solution')
# Plot Analytical solution
fsvalues_ana=[]
offset=-bathymetry_function(0.0)+dzero
xcoords.sort()
for x in xcoords:
fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
# Plot vertical line in bathmetry on right boundary
xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
fsvalues_ana.append(2.1)
ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry', linewidth=2.5)
#plt.legend()
if t==plot_end:
# change from meters in kilometers in the x-axis
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = plt.xticks()
for i in range(0,len(locs)):
labels[i]=str(locs[i]/1000)
plt.xticks(locs, labels)
plt.ylim(-2.2,1.4)
# plt.title(plot_name)
plt.xlabel('Position [km]')
plt.ylabel('Free surface [m]')
if save=='':
plt.draw()
raw_input("Please press Enter")
else:
plt.savefig(save+'.pdf', facecolor='white', edgecolor='black', dpi=100)
plt.cla()
t=t+1
# Make video from the images:
# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
if __name__ == "__main__":
main()
| lgpl-2.1 |
jreback/pandas | pandas/tests/scalar/timestamp/test_unary_ops.py | 1 | 15498 | from datetime import datetime
from dateutil.tz import gettz
import pytest
import pytz
from pytz import utc
from pandas._libs.tslibs import NaT, Timestamp, conversion, to_offset
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
import pandas.util._test_decorators as td
import pandas._testing as tm
class TestTimestampUnaryOps:
# --------------------------------------------------------------
# Timestamp.round
@pytest.mark.parametrize(
"timestamp, freq, expected",
[
("20130101 09:10:11", "D", "20130101"),
("20130101 19:10:11", "D", "20130102"),
("20130201 12:00:00", "D", "20130202"),
("20130104 12:00:00", "D", "20130105"),
("2000-01-05 05:09:15.13", "D", "2000-01-05 00:00:00"),
("2000-01-05 05:09:15.13", "H", "2000-01-05 05:00:00"),
("2000-01-05 05:09:15.13", "S", "2000-01-05 05:09:15"),
],
)
def test_round_frequencies(self, timestamp, freq, expected):
dt = Timestamp(timestamp)
result = dt.round(freq)
expected = Timestamp(expected)
assert result == expected
def test_round_tzaware(self):
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("D")
expected = Timestamp("20130101", tz="US/Eastern")
assert result == expected
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("s")
assert result == dt
def test_round_30min(self):
# round
dt = Timestamp("20130104 12:32:00")
result = dt.round("30Min")
expected = Timestamp("20130104 12:30:00")
assert result == expected
def test_round_subsecond(self):
# GH#14440 & GH#15578
result = Timestamp("2016-10-17 12:00:00.0015").round("ms")
expected = Timestamp("2016-10-17 12:00:00.002000")
assert result == expected
result = Timestamp("2016-10-17 12:00:00.00149").round("ms")
expected = Timestamp("2016-10-17 12:00:00.001000")
assert result == expected
ts = Timestamp("2016-10-17 12:00:00.0015")
for freq in ["us", "ns"]:
assert ts == ts.round(freq)
result = Timestamp("2016-10-17 12:00:00.001501031").round("10ns")
expected = Timestamp("2016-10-17 12:00:00.001501030")
assert result == expected
def test_round_nonstandard_freq(self):
with tm.assert_produces_warning(False):
Timestamp("2016-10-17 12:00:00.001501031").round("1010ns")
def test_round_invalid_arg(self):
stamp = Timestamp("2000-01-05 05:09:15.13")
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
stamp.round("foo")
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
("2117-01-01 00:00:45", "floor", "15s", "2117-01-01 00:00:45"),
("2117-01-01 00:00:45", "ceil", "15s", "2117-01-01 00:00:45"),
(
"2117-01-01 00:00:45.000000012",
"floor",
"10ns",
"2117-01-01 00:00:45.000000010",
),
(
"1823-01-01 00:00:01.000000012",
"ceil",
"10ns",
"1823-01-01 00:00:01.000000020",
),
("1823-01-01 00:00:01", "floor", "1s", "1823-01-01 00:00:01"),
("1823-01-01 00:00:01", "ceil", "1s", "1823-01-01 00:00:01"),
("NaT", "floor", "1s", "NaT"),
("NaT", "ceil", "1s", "NaT"),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = Timestamp(test_input)
func = getattr(dt, rounder)
result = func(freq)
if dt is NaT:
assert result is NaT
else:
expected = Timestamp(expected)
assert result == expected
@pytest.mark.parametrize(
"test_input, freq, expected",
[
("2018-01-01 00:02:06", "2s", "2018-01-01 00:02:06"),
("2018-01-01 00:02:00", "2T", "2018-01-01 00:02:00"),
("2018-01-01 00:04:00", "4T", "2018-01-01 00:04:00"),
("2018-01-01 00:15:00", "15T", "2018-01-01 00:15:00"),
("2018-01-01 00:20:00", "20T", "2018-01-01 00:20:00"),
("2018-01-01 03:00:00", "3H", "2018-01-01 03:00:00"),
],
)
@pytest.mark.parametrize("rounder", ["ceil", "floor", "round"])
def test_round_minute_freq(self, test_input, freq, expected, rounder):
# Ensure timestamps that shouldn't round dont!
# GH#21262
dt = Timestamp(test_input)
expected = Timestamp(expected)
func = getattr(dt, rounder)
result = func(freq)
assert result == expected
def test_ceil(self):
dt = Timestamp("20130101 09:10:11")
result = dt.ceil("D")
expected = Timestamp("20130102")
assert result == expected
def test_floor(self):
dt = Timestamp("20130101 09:10:11")
result = dt.floor("D")
expected = Timestamp("20130101")
assert result == expected
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
def test_round_dst_border_ambiguous(self, method):
# GH 18946 round near "fall back" DST
ts = Timestamp("2017-10-29 00:00:00", tz="UTC").tz_convert("Europe/Madrid")
#
result = getattr(ts, method)("H", ambiguous=True)
assert result == ts
result = getattr(ts, method)("H", ambiguous=False)
expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert(
"Europe/Madrid"
)
assert result == expected
result = getattr(ts, method)("H", ambiguous="NaT")
assert result is NaT
msg = "Cannot infer dst time"
with pytest.raises(pytz.AmbiguousTimeError, match=msg):
getattr(ts, method)("H", ambiguous="raise")
@pytest.mark.parametrize(
"method, ts_str, freq",
[
["ceil", "2018-03-11 01:59:00-0600", "5min"],
["round", "2018-03-11 01:59:00-0600", "5min"],
["floor", "2018-03-11 03:01:00-0500", "2H"],
],
)
def test_round_dst_border_nonexistent(self, method, ts_str, freq):
# GH 23324 round near "spring forward" DST
ts = Timestamp(ts_str, tz="America/Chicago")
result = getattr(ts, method)(freq, nonexistent="shift_forward")
expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago")
assert result == expected
result = getattr(ts, method)(freq, nonexistent="NaT")
assert result is NaT
msg = "2018-03-11 02:00:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
getattr(ts, method)(freq, nonexistent="raise")
@pytest.mark.parametrize(
"timestamp",
[
"2018-01-01 0:0:0.124999360",
"2018-01-01 0:0:0.125000367",
"2018-01-01 0:0:0.125500",
"2018-01-01 0:0:0.126500",
"2018-01-01 12:00:00",
"2019-01-01 12:00:00",
],
)
@pytest.mark.parametrize(
"freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"1D",
],
)
def test_round_int64(self, timestamp, freq):
# check that all rounding modes are accurate to int64 precision
# see GH#22591
dt = Timestamp(timestamp)
unit = to_offset(freq).nanos
# test floor
result = dt.floor(freq)
assert result.value % unit == 0, f"floor not a {freq} multiple"
assert 0 <= dt.value - result.value < unit, "floor error"
# test ceil
result = dt.ceil(freq)
assert result.value % unit == 0, f"ceil not a {freq} multiple"
assert 0 <= result.value - dt.value < unit, "ceil error"
# test round
result = dt.round(freq)
assert result.value % unit == 0, f"round not a {freq} multiple"
assert abs(result.value - dt.value) <= unit // 2, "round error"
if unit % 2 == 0 and abs(result.value - dt.value) == unit // 2:
# round half to even
assert result.value // unit % 2 == 0, "round half to even error"
# --------------------------------------------------------------
# Timestamp.replace
def test_replace_naive(self):
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00")
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00")
assert result == expected
def test_replace_aware(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
# replacing datetime components with and w/o presence of a timezone
ts = Timestamp("2016-01-01 09:00:00", tz=tz)
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00", tz=tz)
assert result == expected
def test_replace_preserves_nanos(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00.000000123", tz=tz)
assert result == expected
def test_replace_multiple(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
# replacing datetime components with and w/o presence of a timezone
# test all
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
result = ts.replace(
year=2015,
month=2,
day=2,
hour=0,
minute=5,
second=5,
microsecond=5,
nanosecond=5,
)
expected = Timestamp("2015-02-02 00:05:05.000005005", tz=tz)
assert result == expected
def test_replace_invalid_kwarg(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
msg = r"replace\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
ts.replace(foo=5)
def test_replace_integer_args(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
msg = "value must be an integer, received <class 'float'> for hour"
with pytest.raises(ValueError, match=msg):
ts.replace(hour=0.1)
def test_replace_tzinfo_equiv_tz_localize_none(self):
# GH#14621, GH#7825
# assert conversion to naive is the same as replacing tzinfo with None
ts = Timestamp("2013-11-03 01:59:59.999999-0400", tz="US/Eastern")
assert ts.tz_localize(None) == ts.replace(tzinfo=None)
@td.skip_if_windows
def test_replace_tzinfo(self):
# GH#15683
dt = datetime(2016, 3, 27, 1)
tzinfo = pytz.timezone("CET").localize(dt, is_dst=False).tzinfo
result_dt = dt.replace(tzinfo=tzinfo)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo)
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None)
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
@pytest.mark.parametrize(
"tz, normalize",
[
(pytz.timezone("US/Eastern"), lambda x: x.tzinfo.normalize(x)),
(gettz("US/Eastern"), lambda x: x),
],
)
def test_replace_across_dst(self, tz, normalize):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
ts_naive = Timestamp("2017-12-03 16:03:30")
ts_aware = conversion.localize_pydatetime(ts_naive, tz)
# Preliminary sanity-check
assert ts_aware == normalize(ts_aware)
# Replace across DST boundary
ts2 = ts_aware.replace(month=6)
# Check that `replace` preserves hour literal
assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute)
# Check that post-replace object is appropriately normalized
ts2b = normalize(ts2)
assert ts2 == ts2b
def test_replace_dst_border(self):
# Gh 7825
t = Timestamp("2013-11-3", tz="America/Chicago")
result = t.replace(hour=3)
expected = Timestamp("2013-11-3 03:00:00", tz="America/Chicago")
assert result == expected
@pytest.mark.parametrize("fold", [0, 1])
@pytest.mark.parametrize("tz", ["dateutil/Europe/London", "Europe/London"])
def test_replace_dst_fold(self, fold, tz):
# GH 25017
d = datetime(2019, 10, 27, 2, 30)
ts = Timestamp(d, tz=tz)
result = ts.replace(hour=1, fold=fold)
expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize(
tz, ambiguous=not fold
)
assert result == expected
# --------------------------------------------------------------
# Timestamp.normalize
@pytest.mark.parametrize("arg", ["2013-11-30", "2013-11-30 12:00:00"])
def test_normalize(self, tz_naive_fixture, arg):
tz = tz_naive_fixture
ts = Timestamp(arg, tz=tz)
result = ts.normalize()
expected = Timestamp("2013-11-30", tz=tz)
assert result == expected
def test_normalize_pre_epoch_dates(self):
# GH: 36294
result = Timestamp("1969-01-01 09:00:00").normalize()
expected = Timestamp("1969-01-01 00:00:00")
assert result == expected
# --------------------------------------------------------------
@td.skip_if_windows
def test_timestamp(self):
# GH#17329
# tz-naive --> treat it as if it were UTC for purposes of timestamp()
ts = Timestamp.now()
uts = ts.replace(tzinfo=utc)
assert ts.timestamp() == uts.timestamp()
tsc = Timestamp("2014-10-11 11:00:01.12345678", tz="US/Central")
utsc = tsc.tz_convert("UTC")
# utsc is a different representation of the same time
assert tsc.timestamp() == utsc.timestamp()
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
# should agree with datetime.timestamp method
dt = ts.to_pydatetime()
assert dt.timestamp() == ts.timestamp()
@pytest.mark.parametrize("fold", [0, 1])
def test_replace_preserves_fold(fold):
# GH 37610. Check that replace preserves Timestamp fold property
tz = gettz("Europe/Moscow")
ts = Timestamp(year=2009, month=10, day=25, hour=2, minute=30, fold=fold, tzinfo=tz)
ts_replaced = ts.replace(second=1)
assert ts_replaced.fold == fold
| bsd-3-clause |
squaregoldfish/QuinCe | external_scripts/NRT/Saildrone_conversion/saildrone_module/api.py | 2 | 4650 | ###############################################################################
### FUNCTIONS WHICH SEND REQUESTS TO THE SAILDRONE API ###
###############################################################################
### Description:
# Several requests are sent to the Saildrone API:
# - request a token (function 'auth')
# - request a list of what we can access (function 'get_available')
# - request to download data (function 'write_json')
# This document contains functions executing such requests.
#------------------------------------------------------------------------------
import json
import urllib.request
from urllib.error import HTTPError
import os
import pandas as pd
from datetime import datetime
REQUEST_HEADER = {'Content-Type':'application/json', 'Accept':'application/json'}
# Function which converts html request output to a dictionary
def to_dict(url):
response = None
# urlopen throws an error if the HTTP status is not 200.
# The error is still a response object, so we can grab it -
# we need to examine it either way
try:
response = urllib.request.urlopen(url)
except HTTPError as e:
response = e
# Get the response body as a dictionary
dictionary = json.loads(response.read().decode('utf-8'))
error = False
if response.status == 400:
if dictionary['message'] != "Request out of time bound":
error = True
elif response.status >= 400:
error = True
if error:
# The response is an error, so we can simply raise it
raise response
return dictionary
# Function which returns the token needed for authentication
def auth(authentication):
# Define the authentication request url
auth_url = 'https://developer-mission.saildrone.com/v1/auth'
# Define our data
our_data = json.dumps({'key':authentication['key'],
'secret':authentication['secret']}).encode()
# Send the request
auth_request = urllib.request.Request(
url=auth_url, headers=REQUEST_HEADER,
data=our_data, method='POST')
# Convert the response to a dictionary. Extract and return the token
auth_response_dict = to_dict(auth_request)
token = auth_response_dict['token']
return token
# Function returning a list of what's available from the Saildrone API
def get_available(token):
# Define the url for requesting what's available
check_available_url = 'https://developer-mission.saildrone.com/v1/auth/'\
+ 'access?token=' + token
# Send the request
check_available_request = urllib.request.Request(
check_available_url, method='GET')
# Convert the output to a dictionary. Extract and return the access list.
available_dict = to_dict(check_available_request)
data = available_dict['data']
access_list = data['access']
return access_list
# Function which requests data download. I returns the path to the downloaded
# json file.
def write_json(data_dir, drone_id, dataset, start, end, token):
# Since we can only receive 1000 records per download request we need to
# keep requesting (while loop) until we do not receive any data
more_to_request = True
data_list_concat = []
offset = 0
while (more_to_request is True):
# Define the download request URL
get_data_url = 'https://developer-mission.saildrone.com/v1/timeseries/'\
+ f'{drone_id}?data_set={dataset}&interval=1&start_date={start}&end_date='\
+ f'{end}&order_by=asc&limit=1000&offset={offset}&token={token}'
#print(get_data_url)
# Send request
data_request = urllib.request.Request(
get_data_url, headers=REQUEST_HEADER, method='GET')
# Store output from request in dictionary
data_dict = to_dict(data_request)
# Continue adding new data to the concatenated data list until
# we receive less than 10 records. (Because the data is being updated)
# constantly, we can get into odd loops where we're chasing one new record
# every time.)
#
# Once that's done, add one second to the last record received to be the
# start point for the next request.
#print('Received ' + str(len(data_dict['data'])))
if len(data_dict['data']) < 10:
more_to_request = False
else:
data_list_concat = data_list_concat + data_dict['data']
offset = offset + len(data_dict['data'])
#print('Total length ' + str(len(data_list_concat)))
# Replace the data section of the last json file received with the
# concatenated data list
data_dict['data'] = data_list_concat
# Write the dictionary to a json file
output_file_name = str(drone_id) + '_' + dataset + '.json'
output_file_path = os.path.join(data_dir, output_file_name)
with open(output_file_path, 'w') as outfile:
json.dump(data_dict, outfile,
sort_keys=True, indent=4, separators=(',',': '))
return output_file_path
| gpl-3.0 |
znreza/supervised_classification | findBestCgamma.py | 1 | 2632 | import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import svm
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc, mean_squared_error,accuracy_score
from scipy import interp
import random
import matplotlib.pyplot as plt
data = pd.read_csv('twogaussians.csv',header=None)
#data = pd.read_csv('twospirals.csv',header=None)
#data = pd.read_csv('halfkernel.csv',header=None)
#data = pd.read_csv('clusterincluster.csv',header=None)
data.columns = ['a','b','class']
#random.shuffle(data)
data = shuffle(data)
X = np.array(data.drop(['class'],1))
X = preprocessing.scale(X)
Y = np.array(data['class'])
for c in range(len(Y)):
if(Y[c] == 1):
Y[c] = 0
else : Y[c] = 1
#random.shuffle(zip(X,Y))
C_range = np.logspace(-2, 10, 5)
gamma_range = np.logspace(-9, 3, 5)
degree = np.arange(2,11,1)
fpv = []
tprs = []
aucs = []
mse = []
lowest_mse = []
mean_fpr = np.linspace(0, 1, 100)
classifiers = []
acc = []
X_train = X[:-100]
Y_train = Y[:-100]
X_test = X[-100:]
Y_test = Y[-100:]
for C in C_range:
for gamma in gamma_range:
clf = svm.SVC(kernel='rbf',C=C, gamma=gamma,probability=True)
clf.fit(X_train, Y_train)
prediction = clf.predict(X_test)
mse.append((mean_squared_error(Y_test, prediction),C,gamma))
prediction = clf.predict(X_test)
acc.append(accuracy_score(Y_test, prediction))
probas_ = clf.predict_proba(X_test)
# Compute ROC curve and area underthe curve
lowest_mse.append(min(mse))
lowest_mse.append(mse[3])
lowest_mse.append(mse[20])
for i in range(len(lowest_mse)):
clf = svm.SVC(kernel='rbf',C=lowest_mse[i][1],
gamma=lowest_mse[i][2],probability=True)
clf.fit(X_train, Y_train)
probas_ = clf.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(Y_test, probas_[:, 1])
#tprs.append(interp(mean_fpr, fpr, tpr))
#tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.8,
label='C = %f gamma = %f (AUC = %0.2f)' %(lowest_mse[i][1],lowest_mse[i][2],roc_auc))
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Random', alpha=.8)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve and AUC for C and gamma')
plt.legend(loc="lower right")
plt.show()
print(min(mse))
| gpl-3.0 |
mediagit2016/workcamp-maschinelles-lernen-grundlagen | 17-12-11-workcamp-ml/mglearn/plot_knn_regression.py | 4 | 1285 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import euclidean_distances
from .datasets import make_wave
from .plot_helpers import cm3
def plot_knn_regression(n_neighbors=1):
X, y = make_wave(n_samples=40)
X_test = np.array([[-1.5], [0.9], [1.5]])
dist = euclidean_distances(X, X_test)
closest = np.argsort(dist, axis=0)
plt.figure(figsize=(10, 6))
reg = KNeighborsRegressor(n_neighbors=n_neighbors).fit(X, y)
y_pred = reg.predict(X_test)
for x, y_, neighbors in zip(X_test, y_pred, closest.T):
for neighbor in neighbors[:n_neighbors]:
plt.arrow(x[0], y_, X[neighbor, 0] - x[0], y[neighbor] - y_,
head_width=0, fc='k', ec='k')
train, = plt.plot(X, y, 'o', c=cm3(0))
test, = plt.plot(X_test, -3 * np.ones(len(X_test)), '*', c=cm3(2),
markersize=20)
pred, = plt.plot(X_test, y_pred, '*', c=cm3(0), markersize=20)
plt.vlines(X_test, -3.1, 3.1, linestyle="--")
plt.legend([train, test, pred],
["training data/target", "test data", "test prediction"],
ncol=3, loc=(.1, 1.025))
plt.ylim(-3.1, 3.1)
plt.xlabel("Feature")
plt.ylabel("Target")
| gpl-3.0 |
marcusrehm/data-compare | datacompare/__init__.py | 1 | 1929 | __all__ = ['compare_datasets']
import pandas as pd
import numpy as np
SUFFIX_DF1 = '_df1'
SUFFIX_DF2 = '_df2'
def equals_condition(df1_columns_to_compare, df2_columns_to_compare):
condition = []
for col_x, col_y in zip(df1_columns_to_compare, df2_columns_to_compare):
condition.append(col_x + ' == ' + col_y)
return ' and '.join(condition)
def not_exists_condition(df_columns_to_compare):
condition = []
for col in df_columns_to_compare:
condition.append(col)
return ' + '.join(condition) + ' != ' + ' + '.join(condition)
def compare_datasets(df1, df2, df1_keys, df2_keys,
df1_columns_to_compare=None,
df2_columns_to_compare=None):
if not df1_columns_to_compare:
df1_columns_to_compare = list(column for column in df1.columns.difference(df1_keys))
if not df2_columns_to_compare:
df2_columns_to_compare = list(column for column in df2.columns.difference(df2_keys))
for column in df1_columns_to_compare:
if column in df2_columns_to_compare:
df1_columns_to_compare[df1_columns_to_compare.index(column)] = column + SUFFIX_DF1
df2_columns_to_compare[df2_columns_to_compare.index(column)] = column + SUFFIX_DF2
df_result = pd.merge(df1, df2, how='outer', indicator=True,
suffixes=(SUFFIX_DF1, SUFFIX_DF2),
left_on=df1_keys, right_on=df2_keys)
df_result.eval('equals = ' + equals_condition(df1_columns_to_compare,
df2_columns_to_compare),
inplace=True)
df_result['_merge'] = np.where(df_result['equals'],
'equals',
df_result['_merge'])
df_result.drop(labels='equals', axis=1, inplace=True)
df_result.rename(columns={'_merge': 'result'}, inplace=True)
return df_result
| mit |
jungla/ICOM-fluidity-toolbox | Detectors/plot_FSLE_v.py | 1 | 2388 | #!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import myfun
import numpy as np
import pyvtk
import vtktools
import copy
import os
exp = 'r_3k_B_1F0_r'
filename = './ring_checkpoint.detectors'
filename2 = '/tamay2/mensa/fluidity/'+exp+'/ring_30.pvtu'
data = vtktools.vtu(filename2)
coords = data.GetLocations()
depths = sorted(list(set(coords[:,2])))
Xlist = np.arange(-100000,100000+10000,10000)# x co-ordinates of the desired array shape
Ylist = np.arange(0,1)*0.0
Zlist = np.arange(-10,-900,-10)# y co-ordinates of the desired array shape
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
pts = zip(X,Y,Z)
pts = vtktools.arr(pts)
R = data.ProbeData(pts, 'Density_CG')
rho = np.reshape(R,[len(Zlist),len(Ylist),len(Xlist)])
try: os.stat('./plot/'+exp)
except OSError: os.mkdir('./plot/'+exp)
print 'reading detectors'
det = fluidity_tools.stat_parser(filename)
keys = det.keys() # particles
print 'done.'
tt = 1200
pt = 5896
step = 1
z = range(-10,-890,-10)
x = range(-100000,100000,3000)
y = 0.0
par = np.zeros((pt,3,tt))
time = range(1800,1800*(tt+1),1800)
# read particles
for d in range(pt):
temp = det['particles_'+myfun.digit(d+1,4)]['position']
par[d,:,:] = temp[:,0:tt]
#fsle param
di = 10 # base separation distance [m]. Taken as the distance between the particles in the triplet.
# read T from archive
for r in np.linspace(1,3):
#print 'plotting for dr:',r*di
fsle = np.zeros(pt)*np.nan
df = 11.0 #r*di # separation distance
#
# loop triplets in time
#
#
for t in range(tt):
for d in range(0,pt-len(x)):
# loop particles
if par[d,2,t] < 0.0 and par[d+len(x),2,t] < 0.0:
dr = np.linalg.norm(par[d,2,t]-par[d+len(x),2,t])
# if dr > 15.0: print dr,d,t
if (dr > df and np.isnan(fsle[d])):
fsle[d] = np.log(dr/di)/time[t]
min_fsle = np.percentile(fsle,0.1)
max_fsle = 0.0000005 #np.percentile(fsle,99)
fsler = np.reshape(fsle,(len(z),len(x)))
#
plt.figure()
v = np.linspace(1e-7,1e-6, 25, endpoint=True)
plt.contourf(x,z,fsler,v,extend='both',cmap='jet')
plt.colorbar(format='%.3e')
plt.contour(Xlist,Zlist,np.squeeze(rho),20,colors=[0.5,0.5,0.5])
plt.savefig('./plot/'+exp+'/fsle_'+exp+'_'+str(df)+'.eps',bbox_inches='tight')
plt.close()
| gpl-2.0 |
madscatt/zazzie | src/sassie/calculate/sascalc_pbc/lennard_gofr.py | 2 | 3859 | from __future__ import division
import numpy as np
import sys
import os
import sasmol.sasmol as sasmol
import compiledUtils.dna_overlap as dna_overlap
def deduceBoxSize(xCoor, yCoor, zCoor):
# first check if there is CRYST1 thing
xLen = xCoor.max() - xCoor.min()
yLen = yCoor.max() - yCoor.min()
zLen = zCoor.max() - zCoor.min()
# print(xLen)
# print(yLen)
# print(zLen)
# print('lens ^^^^^^^^^^^^^^^^^^^')
return max(map(round, [xLen, yLen, zLen]))
'''
for this_segname in system.segnames():
basis = 'segname[i] == "' + this_segname + '"'
error, this_mask = system.get_subset_mask(basis)
if len(error) > 0:
print("ERROR: "+str(error))
sys.exit()
print(this_mask)
print(type(this_mask))
x = system.coor()[0][:,0]
y = system.coor()[0][:,1]
z = system.coor()[0][:,2]
xCoor[int(this_segname)-1]=np.ma.masked_array(x,this_mask).compressed()
yCoor[int(this_segname)-1]=np.ma.masked_array(y,this_mask).compressed()
zCoor[int(this_segname)-1]=np.ma.masked_array(z,this_mask).compressed()
'''
class gofr_calc:
ngr = 0
g = 0
length = 0
delg = 0
coors = 0
npart = 0
def __init__(self, mol, box_length, nhis=600):
self.ngr = 0
self.g = np.zeros(nhis)
self.nhis = nhis
self.coors = mol.coor()
xCoor = self.coors[0][:, 0]
yCoor = self.coors[0][:, 1]
zCoor = self.coors[0][:, 2]
self.length = deduceBoxSize(xCoor, yCoor, zCoor)
self.delg = self.length / (2 * self.nhis)
self.npart = len(xCoor)
def g_hist(self, frame=0):
'''
takes in a sasmol object+ frame number
outputs the g histogram for that frame
'''
xCoor = self.coors[frame][:, 0]
yCoor = self.coors[frame][:, 1]
zCoor = self.coors[frame][:, 2]
npart = len(xCoor)
self.ngr += 1
self.g = dna_overlap.gr(
1, self.coors[frame], self.g, self.length, self.delg, 0)
'''
dist = np.zeros((self.npart,self.npart))
for part1 in xrange(self.npart-1):
for part2 in xrange(part1+1,self.npart):
# ngr += 1
#dx = xCoor[part1] - xCoor[part2]
#dy = yCoor[part1] - yCoor[part2]
#dz = zCoor[part1] - zCoor[part2]
#dx = dx - self.length*int(dx/self.length)
#dy = dy - self.length*int(dy/self.length)
#dz = dz - self.length*int(dz/self.length)
#dr = np.sqrt(dx**2+dy**2+dz**2)
dr = dist[part1,part2]
if(dr<self.length/2): # can extend this to use the corners
ig =int(dr/self.delg)#int(dr/delg)
self.g[ig] += 2
'''
def g_of_r(self, sigma=3.405):
for i in range(self.nhis):
r = self.delg * (i + .5)
vb = ((i + 1)**3 - i**3) * self.delg**3
rho = self.npart / self.length**3
nid = (4 / 3) * np.pi * vb * rho
self.g[i] = self.g[i] / (self.npart * nid * self.ngr)
x = np.linspace(0, self.length / 2, len(self.g)) * sigma
return (x, self.g)
if __name__ == '__main__':
path = '/home/schowell/ellipsoids_simulation/simulations/LJ_sphere_monomer'
pdb_file = os.path.join(path, 'run_0.pdb')
dcd_file = os.path.join(path, 'run_1.dcd')
box_length_file = os.path.join(path, 'box_length.txt')
mol = sasmol.SasMol(0)
mol.read_pdb(pdb_file)
mol.read_dcd(dcd_file)
box_length = np.loadtxt(box_length_file)[:, 1]
gc = gofr_calc(mol, box_length, nhis=200)
gc.g_hist(200)
r, g_of_r = gc.g_of_r()
if True:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(r, g_of_r)
plt.savefig('g_of_r.png', dpi=400)
print('\m/ >.< \m/')
l | gpl-3.0 |
gfyoung/pandas | pandas/tests/api/test_api.py | 3 | 7706 | import subprocess
import sys
from typing import List
import pytest
import pandas as pd
from pandas import api
import pandas._testing as tm
class Base:
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted(f for f in dir(namespace) if not f.startswith("__"))
if ignored is not None:
result = sorted(set(result) - set(ignored))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ["tests", "locale", "conftest"]
# top-level sub-packages
lib = [
"api",
"arrays",
"compat",
"core",
"errors",
"pandas",
"plotting",
"test",
"testing",
"tseries",
"util",
"options",
"io",
]
# these are already deprecated; awaiting removal
deprecated_modules: List[str] = ["np", "datetime"]
# misc
misc = ["IndexSlice", "NaT", "NA"]
# top-level classes
classes = [
"Categorical",
"CategoricalIndex",
"DataFrame",
"DateOffset",
"DatetimeIndex",
"ExcelFile",
"ExcelWriter",
"Float64Index",
"Flags",
"Grouper",
"HDFStore",
"Index",
"Int64Index",
"MultiIndex",
"Period",
"PeriodIndex",
"RangeIndex",
"UInt64Index",
"Series",
"SparseDtype",
"StringDtype",
"Timedelta",
"TimedeltaIndex",
"Timestamp",
"Interval",
"IntervalIndex",
"CategoricalDtype",
"PeriodDtype",
"IntervalDtype",
"DatetimeTZDtype",
"BooleanDtype",
"Int8Dtype",
"Int16Dtype",
"Int32Dtype",
"Int64Dtype",
"UInt8Dtype",
"UInt16Dtype",
"UInt32Dtype",
"UInt64Dtype",
"Float32Dtype",
"Float64Dtype",
"NamedAgg",
]
# these are already deprecated; awaiting removal
deprecated_classes: List[str] = []
# these should be deprecated in the future
deprecated_classes_in_future: List[str] = ["SparseArray"]
# external modules exposed in pandas namespace
modules: List[str] = []
# top-level functions
funcs = [
"array",
"bdate_range",
"concat",
"crosstab",
"cut",
"date_range",
"interval_range",
"eval",
"factorize",
"get_dummies",
"infer_freq",
"isna",
"isnull",
"lreshape",
"melt",
"notna",
"notnull",
"offsets",
"merge",
"merge_ordered",
"merge_asof",
"period_range",
"pivot",
"pivot_table",
"qcut",
"show_versions",
"timedelta_range",
"unique",
"value_counts",
"wide_to_long",
]
# top-level option funcs
funcs_option = [
"reset_option",
"describe_option",
"get_option",
"option_context",
"set_option",
"set_eng_float_format",
]
# top-level read_* funcs
funcs_read = [
"read_clipboard",
"read_csv",
"read_excel",
"read_fwf",
"read_gbq",
"read_hdf",
"read_html",
"read_json",
"read_pickle",
"read_sas",
"read_sql",
"read_sql_query",
"read_sql_table",
"read_stata",
"read_table",
"read_feather",
"read_parquet",
"read_orc",
"read_spss",
]
# top-level json funcs
funcs_json = ["json_normalize"]
# top-level to_* funcs
funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"]
# top-level to deprecate in the future
deprecated_funcs_in_future: List[str] = []
# these are already deprecated; awaiting removal
deprecated_funcs: List[str] = []
# private modules in pandas namespace
private_modules = [
"_config",
"_hashtable",
"_lib",
"_libs",
"_np_version_under1p17",
"_np_version_under1p18",
"_is_numpy_dev",
"_testing",
"_tslib",
"_typing",
"_version",
]
def test_api(self):
checkthese = (
self.lib
+ self.misc
+ self.modules
+ self.classes
+ self.funcs
+ self.funcs_option
+ self.funcs_read
+ self.funcs_json
+ self.funcs_to
+ self.private_modules
)
self.check(pd, checkthese, self.ignored)
def test_depr(self):
deprecated_list = (
self.deprecated_modules
+ self.deprecated_classes
+ self.deprecated_classes_in_future
+ self.deprecated_funcs
+ self.deprecated_funcs_in_future
)
for depr in deprecated_list:
with tm.assert_produces_warning(FutureWarning):
_ = getattr(pd, depr)
def test_datetime():
from datetime import datetime
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert datetime(2015, 1, 2, 0, 0) == pd.datetime(2015, 1, 2, 0, 0)
assert isinstance(pd.datetime(2015, 1, 2, 0, 0), pd.datetime)
def test_sparsearray():
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert isinstance(pd.array([1, 2, 3], dtype="Sparse"), pd.SparseArray)
def test_np():
import warnings
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert (pd.np.arange(0, 10) == np.arange(0, 10)).all()
class TestApi(Base):
allowed = ["types", "extensions", "indexers"]
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = [
"assert_frame_equal",
"assert_series_equal",
"assert_index_equal",
"assert_extension_array_equal",
]
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
def test_util_testing_deprecated(self):
# avoid cache state affecting the test
sys.modules.pop("pandas.util.testing", None)
with tm.assert_produces_warning(FutureWarning) as m:
import pandas.util.testing # noqa: F401
assert "pandas.util.testing is deprecated" in str(m[0].message)
assert "pandas.testing instead" in str(m[0].message)
def test_util_testing_deprecated_direct(self):
# avoid cache state affecting the test
sys.modules.pop("pandas.util.testing", None)
with tm.assert_produces_warning(FutureWarning) as m:
from pandas.util.testing import assert_series_equal # noqa: F401
assert "pandas.util.testing is deprecated" in str(m[0].message)
assert "pandas.testing instead" in str(m[0].message)
def test_util_in_top_level(self):
# in a subprocess to avoid import caching issues
out = subprocess.check_output(
[
sys.executable,
"-c",
"import pandas; pandas.util.testing.assert_series_equal",
],
stderr=subprocess.STDOUT,
).decode()
assert "pandas.util.testing is deprecated" in out
with pytest.raises(AttributeError, match="foo"):
pd.util.foo
| bsd-3-clause |
hgrif/incubator-airflow | airflow/contrib/hooks/bigquery_hook.py | 4 | 44979 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
from apiclient.discovery import build, HttpError
from googleapiclient import errors
from builtins import range
from pandas_gbq.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
from past.builtins import basestring
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None, dialect='legacy'):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
:param parameters: The parameters to render the SQL query with (not used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
:type dialect: string in {'legacy', 'standard'}, default 'legacy'
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service, dialect=dialect)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the table. The connection supplied to the hook
must provide access to the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id
).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False, dialect='legacy'):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
self.running_job_id = None
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True,
maximum_billing_tier=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: string
:param create_disposition: Specifies whether the job is allowed to create new tables.
:type create_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
:param maximum_billing_tier: Positive integer that serves as a multiplier of the basic price.
:type maximum_billing_tier: integer
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql,
'maximumBillingTier': maximum_billing_tier
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
if query_params:
configuration['query']['queryParameters'] = query_params
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs={}):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing columns
are treated as bad records, and if there are too many bad records, an invalid error is
returned in the job result. Only applicable when soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: list
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = ["CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS", "DATASTORE_BACKUP"]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION',
"ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options)
)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if schema_fields:
configuration['load']['schema'] = {
'fields': schema_fields
}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options)
)
configuration['load']['schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if quote_character:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': ['allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {k: v for k, v in src_fmt_configs.items()
if k in valid_configs}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(projectId=self.project_id, jobId=self.running_job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.format(
job['status']['errorResult'], job
)
)
else:
self.log.info('Waiting for job to complete : %s, %s', self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info('%s: Retryable error, waiting for job to complete: %s', err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s', err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info('%s: Retryable error while polling job with id %s', err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s', err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id, self.running_job_id)
jobs.cancel(projectId=self.project_id, jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while (polling_attempts < max_polling_attempts and not job_complete):
polling_attempts = polling_attempts+1
job_complete = self.poll_job_complete(self.running_job_id)
if (job_complete):
self.log.info('Job successfully canceled: %s, %s', self.project_id, self.running_job_id)
elif(polling_attempts == max_polling_attempts):
self.log.info('Stopping polling due to timeout. Job with id %s has not completed cancel and may or may not finish.', self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.', self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info(
'Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id
)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info(
'Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id
)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset
)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset
)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception((
'{var}Use either : or . to specify project '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info(
'Project not included in {var}: {input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id
)
)
project_id = default_project_id
return project_id, dataset_id, table_id
| apache-2.0 |
ahoyosid/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
aayushidwivedi01/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_triangle_count_test.py | 10 | 2503 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests triangle count for ATK against the networkx implementation"""
import unittest
import networkx as nx
from sparktkregtests.lib import sparktk_test
class TriangleCount(sparktk_test.SparkTKTestCase):
def test_triangle_counts(self):
"""Build frames and graphs to exercise"""
super(TriangleCount, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.graph = self.context.graph.create(self.vertices, self.frame)
result = self.graph.triangle_count()
triangles = result.to_pandas(result.count())
# Create a dictionary of triangle count per triangle:
dictionary_of_triangle_count = {vertex['id']: (vertex['count'])
for (index, vertex) in triangles.iterrows()}
edge_list = self.frame.take(
n=self.frame.count(), columns=['src', 'dst'])
# build the network x result
g = nx.Graph()
g.add_edges_from(edge_list)
triangle_counts_from_networkx = nx.triangles(g)
self.assertEqual(
dictionary_of_triangle_count, triangle_counts_from_networkx)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
itoijala/pyfeyner | setup.py | 1 | 2375 | #!/usr/bin/env python2
#
# pyfeyner - a simple Python interface for making Feynman diagrams.
# Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel
# Copyright (C) 2013 Ismo Toijala
#
# pyfeyner is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyfeyner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with pyfeyner; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from setuptools import setup
longdesc = """pyfeyner is a package which makes drawing Feynman diagrams simple and programmatic.
Feynman diagrams are important constructs in perturbative field theory, so being able to draw them
in a programmatic fashion is important if attempting to enumerate a large number of diagram
configurations is important. The output quality of pyfeyner diagrams (into PDF or EPS formats)
is very high, and special effects can be obtained by using constructs from PyX, which pyfeyner
is based around."""
setup(name = 'pyfeyner',
version = '0.1',
author = 'Andy Buckley, Georg von Hippel, Ismo Toijala',
author_email = '[email protected]',
url = 'https://github.com/itoijala/pyfeyner',
description = 'An easy-to-use Python library to help physicists draw Feynman diagrams.',
long_description = longdesc,
keywords = 'feynman hep physics particle diagram',
license = 'GPLv2+',
packages = ['pyfeyner'],
install_requires = ['PyX', 'matplotlib'],
zip_safe = False,
classifiers = ['Development Status :: 2 - Per-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Physics',
],
)
| gpl-2.0 |
McIntyre-Lab/papers | nanni_maize_2021/scripts/prep_rnaseq_4_tappas_02avn.py | 1 | 4816 | #!/usr/bin/env python
import pandas as pd
import numpy as np
import argparse
import sys
def getOptions():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Prepare files for tappAS (genotype expression matrix of expected counts and design files) for maize ozone RSEM")
# Input data
parser.add_argument("-t", "--input-TPM", dest="inTPM", required=True, help="Input TSV of combined rsem expression matrix with TPM on/off flags (used to determine detected transcripts)")
parser.add_argument("-c", "--input-count", dest="inCount", required=True, help="Input TSV of combined rsem expression matrix with expected counts (used to output expected counts for detected transcripts for tappas)")
parser.add_argument("-e", "--exclude", dest="exclude", required=False, action='append', help="Samples to exclude from expression matrices, multiple values can be listed with each '-e'")
parser.add_argument("-v", "--value-type", dest="inVal", required=False, choices=['TPM','expected_count'], default='TPM', help="Value type to include in tappas expression matrix (TPM or expected_count, default: TPM)")
# Output data
parser.add_argument("-o", "--output-directory", dest="outDir", required=True, help="Output directory")
args = parser.parse_args()
return args
def main():
# Get combined expression matrix with on/off flags
tpmDF = pd.read_csv(args.inTPM, sep="\t")
countDF = pd.read_csv(args.inCount, sep="\t")
# Remove excluded columns if provided
if args.exclude is not None:
for s in args.exclude:
tpmDF = tpmDF.drop(columns=[c for c in tpmDF.columns if c==s])
countDF = countDF.drop(columns=[c for c in countDF.columns if c==s])
print("Removed {} columns from matrix...".format(s))
# Count and drop transcripts that are not detected in any samples
# and transcripts only detected in one samples
print("{} transcripts detected in 0 samples\n{} transcripts detected in 1 sample\n...Dropped from tappas files".format(
len(tpmDF[tpmDF['sum_flag']==0]),len(tpmDF[tpmDF['sum_flag']==1])))
detectDF = tpmDF[tpmDF['sum_flag']>1].copy()
# Merge detected transcripts wtih expected count values
# Select only those in both (no left_only so it is everyhting in detected DF)
if args.inVal == "expected_count":
mergeDF = pd.merge(detectDF['transcript_id'],countDF,how='outer',on='transcript_id',indicator='merge_check')
mergeDF = mergeDF[mergeDF['merge_check']=="both"]
else:
mergeDF = detectDF.copy()
# Get Amb vs. Oz expression matrices and design files
for genotype in ["B73","C123","Hp301","Mo17","NC338"]:
cols = [c for c in mergeDF.columns if (genotype in c) and ('flag' not in c) and ('mean' not in c)]
mergeDF[['transcript_id']+cols].rename(columns={'transcript_id':""}).to_csv(
"{}/sbys_{}_4_tappas_{}.tsv".format(args.outDir,genotype,args.inVal),sep="\t",index=False)
designDF = pd.DataFrame({'Sample':cols})
designDF['Condition'] = np.where(designDF['Sample'].str.contains('Amb'),"Ambient",
np.where(designDF['Sample'].str.contains('Ele'),"Ozone","oops"))
if len(designDF[designDF['Condition']=="oops"])>0:
print("ERROR: Cannot assign Condition in {}...".format(genotype))
sys.exit()
else:
designDF.sort_values('Condition').to_csv("{}/df_{}_4_tappas.tsv".format(args.outDir,genotype),sep="\t",index=False)
# Get B73 Amb vs. all other genotype Amb expression matrices and design files
B73ambCols = [c for c in mergeDF.columns if ('B73' in c) and ('Amb' in c) and ('flag' not in c) and ('mean' not in c)]
for genotype in ["C123","Hp301","Mo17","NC338"]:
AmbCols = [c for c in mergeDF.columns if (genotype in c) and ('Amb' in c) and ('flag' not in c) and ('mean' not in c)]
mergeDF[['transcript_id']+B73ambCols+AmbCols].rename(columns={'transcript_id':""}).to_csv(
"{}/sbys_B73_vs_{}_Amb_4_tappas_{}.tsv".format(args.outDir,genotype,args.inVal),sep="\t",index=False)
designDF = pd.DataFrame({'Sample':B73ambCols+AmbCols})
designDF['Condition'] = np.where(designDF['Sample'].str.contains('B73'),"B73",
np.where(designDF['Sample'].str.contains(genotype),genotype,"oops"))
if len(designDF[designDF['Condition']=="oops"])>0:
print("ERROR: Cannot assign Condition in {}...".format(genotype))
sys.exit()
else:
designDF.sort_values('Condition').to_csv("{}/df_B73_vs_{}_Amb_4_tappas.tsv".format(args.outDir,genotype),sep="\t",index=False)
if __name__ == '__main__':
# Parse command line arguments
global args
args = getOptions()
main()
| lgpl-3.0 |
stuarteberg/vigra | vigranumpy/examples/non_local_mean_2d_color.py | 10 | 1407 | import vigra
from vigra import numpy
from matplotlib import pylab
from time import time
import multiprocessing
path = "69015.jpg"
#path = "12074.jpg"
path = "100075.jpg"
path = "12003.jpg"
data = vigra.impex.readImage(path).astype(numpy.float32)
cpus = multiprocessing.cpu_count()
print "nCpus",cpus
t0 =time()
#for c in range(3):
# cimg=data[:,:,c]
# cimg-=cimg.min()
# cimg/=cimg.max()
iters = 10
#policy = vigra.filters.RatioPolicy(sigma=10.0, meanRatio=0.95, varRatio=0.5)
policy = vigra.filters.NormPolicy(sigma=50.0, meanDist=50, varRatio=0.5)
#data-=100.0
res = vigra.filters.nonLocalMean2d(data,policy=policy,searchRadius=5,patchRadius=1,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
for i in range(iters-1):
res = vigra.filters.nonLocalMean2d(res,policy=policy,searchRadius=5,patchRadius=2,nThreads=cpus+1,stepSize=2,verbose=True,sigmaMean=10.0)
t1 = time()
res = vigra.taggedView(res,'xyc')
gma = vigra.filters.gaussianGradientMagnitude(res,4.0)
gmb = vigra.filters.gaussianGradientMagnitude(data,4.0)
#data+=100.0
print t1-t0
imgs = [data,res,gma,gmb]
for img in imgs:
for c in range(img.shape[2]):
cimg=img[:,:,c]
cimg-=cimg.min()
cimg/=cimg.max()
f = pylab.figure()
for n, arr in enumerate(imgs):
arr = arr.squeeze()
f.add_subplot(1, len(imgs), n)
pylab.imshow(arr.swapaxes(0,1))
pylab.title('denoised')
pylab.show()
| mit |
kjung/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
parantapa/seaborn | seaborn/tests/test_matrix.py | 3 | 32144 | import itertools
import tempfile
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from .. import matrix as mat
from .. import color_palette
from ..external.six.moves import range
try:
import fastcluster
assert fastcluster
_no_fastcluster = False
except ImportError:
_no_fastcluster = True
class TestHeatmap(object):
rs = np.random.RandomState(sum(map(ord, "heatmap")))
x_norm = rs.randn(4, 8)
letters = pd.Series(["A", "B", "C", "D"], name="letters")
df_norm = pd.DataFrame(x_norm, index=letters)
x_unif = rs.rand(20, 13)
df_unif = pd.DataFrame(x_unif)
default_kws = dict(vmin=None, vmax=None, cmap=None, center=None,
robust=False, annot=False, fmt=".2f", annot_kws=None,
cbar=True, cbar_kws=None, mask=None)
def test_ndarray_input(self):
p = mat._HeatMapper(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm[::-1])
pdt.assert_frame_equal(p.data, pd.DataFrame(self.x_norm).ix[::-1])
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, np.arange(4)[::-1])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "")
def test_df_input(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm[::-1])
pdt.assert_frame_equal(p.data, self.df_norm.ix[::-1])
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, ["D", "C", "B", "A"])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "letters")
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
p = mat._HeatMapper(df, **self.default_kws)
npt.assert_array_equal(p.yticklabels, ["D-4", "C-3", "B-2", "A-1"])
nt.assert_equal(p.ylabel, "letter-number")
p = mat._HeatMapper(df.T, **self.default_kws)
npt.assert_array_equal(p.xticklabels, ["A-1", "B-2", "C-3", "D-4"])
nt.assert_equal(p.xlabel, "letter-number")
def test_mask_input(self):
kws = self.default_kws.copy()
mask = self.x_norm > 0
kws['mask'] = mask
p = mat._HeatMapper(self.x_norm, **kws)
plot_data = np.ma.masked_where(mask, self.x_norm)
npt.assert_array_equal(p.plot_data, plot_data[::-1])
def test_default_sequential_vlims(self):
p = mat._HeatMapper(self.df_unif, **self.default_kws)
nt.assert_equal(p.vmin, self.x_unif.min())
nt.assert_equal(p.vmax, self.x_unif.max())
nt.assert_true(not p.divergent)
def test_default_diverging_vlims(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
vlim = max(abs(self.x_norm.min()), abs(self.x_norm.max()))
nt.assert_equal(p.vmin, -vlim)
nt.assert_equal(p.vmax, vlim)
nt.assert_true(p.divergent)
def test_robust_sequential_vlims(self):
kws = self.default_kws.copy()
kws["robust"] = True
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, np.percentile(self.x_unif, 2))
nt.assert_equal(p.vmax, np.percentile(self.x_unif, 98))
def test_custom_sequential_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = 0
kws["vmax"] = 1
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, 0)
nt.assert_equal(p.vmax, 1)
def test_custom_diverging_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = -4
kws["vmax"] = 5
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.vmin, -5)
nt.assert_equal(p.vmax, 5)
def test_array_with_nans(self):
x1 = self.rs.rand(10, 10)
nulls = np.zeros(10) * np.nan
x2 = np.c_[x1, nulls]
m1 = mat._HeatMapper(x1, **self.default_kws)
m2 = mat._HeatMapper(x2, **self.default_kws)
nt.assert_equal(m1.vmin, m2.vmin)
nt.assert_equal(m1.vmax, m2.vmax)
def test_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
kws = self.default_kws.copy()
kws["mask"] = np.isnan(df.values)
m = mat._HeatMapper(df, **kws)
npt.assert_array_equal(np.isnan(m.plot_data.data),
m.plot_data.mask)
def test_custom_cmap(self):
kws = self.default_kws.copy()
kws["cmap"] = "BuGn"
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.cmap, "BuGn")
def test_centered_vlims(self):
kws = self.default_kws.copy()
kws["center"] = .5
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_true(p.divergent)
nt.assert_equal(p.vmax - .5, .5 - p.vmin)
def test_tickabels_off(self):
kws = self.default_kws.copy()
kws['xticklabels'] = False
kws['yticklabels'] = False
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.xticklabels, ['' for _ in range(
self.df_norm.shape[1])])
nt.assert_equal(p.yticklabels, ['' for _ in range(
self.df_norm.shape[0])])
def test_custom_ticklabels(self):
kws = self.default_kws.copy()
xticklabels = list('iheartheatmaps'[:self.df_norm.shape[1]])
yticklabels = list('heatmapsarecool'[:self.df_norm.shape[0]])
kws['xticklabels'] = xticklabels
kws['yticklabels'] = yticklabels
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.xticklabels, xticklabels)
nt.assert_equal(p.yticklabels, yticklabels[::-1])
def test_custom_ticklabel_interval(self):
kws = self.default_kws.copy()
kws['xticklabels'] = 2
kws['yticklabels'] = 3
p = mat._HeatMapper(self.df_norm, **kws)
nx, ny = self.df_norm.T.shape
ystart = (ny - 1) % 3
npt.assert_array_equal(p.xticks, np.arange(0, nx, 2) + .5)
npt.assert_array_equal(p.yticks, np.arange(ystart, ny, 3) + .5)
npt.assert_array_equal(p.xticklabels,
self.df_norm.columns[::2])
npt.assert_array_equal(p.yticklabels,
self.df_norm.index[::-1][ystart:ny:3])
def test_heatmap_annotation(self):
ax = mat.heatmap(self.df_norm, annot=True, fmt=".1f",
annot_kws={"fontsize": 14})
for val, text in zip(self.x_norm[::-1].flat, ax.texts):
nt.assert_equal(text.get_text(), "{:.1f}".format(val))
nt.assert_equal(text.get_fontsize(), 14)
plt.close("all")
def test_heatmap_annotation_with_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
mask = np.isnan(df.values)
df_masked = np.ma.masked_where(mask, df)
ax = mat.heatmap(df, annot=True, fmt='.1f', mask=mask)
nt.assert_equal(len(df_masked[::-1].compressed()), len(ax.texts))
for val, text in zip(df_masked[::-1].compressed(), ax.texts):
nt.assert_equal("{:.1f}".format(val), text.get_text())
plt.close("all")
def test_heatmap_cbar(self):
f = plt.figure()
mat.heatmap(self.df_norm)
nt.assert_equal(len(f.axes), 2)
plt.close(f)
f = plt.figure()
mat.heatmap(self.df_norm, cbar=False)
nt.assert_equal(len(f.axes), 1)
plt.close(f)
f, (ax1, ax2) = plt.subplots(2)
mat.heatmap(self.df_norm, ax=ax1, cbar_ax=ax2)
nt.assert_equal(len(f.axes), 2)
plt.close(f)
def test_heatmap_axes(self):
ax = mat.heatmap(self.df_norm)
xtl = [int(l.get_text()) for l in ax.get_xticklabels()]
nt.assert_equal(xtl, list(self.df_norm.columns))
ytl = [l.get_text() for l in ax.get_yticklabels()]
nt.assert_equal(ytl, list(self.df_norm.index[::-1]))
nt.assert_equal(ax.get_xlabel(), "")
nt.assert_equal(ax.get_ylabel(), "letters")
nt.assert_equal(ax.get_xlim(), (0, 8))
nt.assert_equal(ax.get_ylim(), (0, 4))
plt.close("all")
def test_heatmap_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(self.df_norm, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 0)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 90)
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(df, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 90)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
def test_heatmap_inner_lines(self):
c = (0, 0, 1, 1)
ax = mat.heatmap(self.df_norm, linewidths=2, linecolor=c)
mesh = ax.collections[0]
nt.assert_equal(mesh.get_linewidths()[0], 2)
nt.assert_equal(tuple(mesh.get_edgecolor()[0]), c)
plt.close("all")
def test_square_aspect(self):
ax = mat.heatmap(self.df_norm, square=True)
nt.assert_equal(ax.get_aspect(), "equal")
plt.close("all")
def test_mask_validation(self):
mask = mat._matrix_mask(self.df_norm, None)
nt.assert_equal(mask.shape, self.df_norm.shape)
nt.assert_equal(mask.values.sum(), 0)
with nt.assert_raises(ValueError):
bad_array_mask = self.rs.randn(3, 6) > 0
mat._matrix_mask(self.df_norm, bad_array_mask)
with nt.assert_raises(ValueError):
bad_df_mask = pd.DataFrame(self.rs.randn(4, 8) > 0)
mat._matrix_mask(self.df_norm, bad_df_mask)
def test_missing_data_mask(self):
data = pd.DataFrame(np.arange(4, dtype=np.float).reshape(2, 2))
data.loc[0, 0] = np.nan
mask = mat._matrix_mask(data, None)
npt.assert_array_equal(mask, [[True, False], [False, False]])
mask_in = np.array([[False, True], [False, False]])
mask_out = mat._matrix_mask(data, mask_in)
npt.assert_array_equal(mask_out, [[True, True], [False, False]])
class TestDendrogram(object):
rs = np.random.RandomState(sum(map(ord, "dendrogram")))
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
try:
import fastcluster
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
except ImportError:
x_norm_distances = distance.squareform(
distance.pdist(x_norm.T, metric='euclidean'))
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_list=['k'],
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
default_kws = dict(linkage=None, metric='euclidean', method='single',
axis=1, label=True, rotate=False)
def test_ndarray_input(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, self.x_norm)
pdt.assert_frame_equal(p.data.T, pd.DataFrame(self.x_norm))
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.reordered_ind, self.x_norm_leaves)
npt.assert_array_equal(p.xticklabels, self.x_norm_leaves)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, None)
nt.assert_equal(p.ylabel, '')
def test_df_input(self):
p = mat._DendrogramPlotter(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, np.asarray(self.df_norm))
pdt.assert_frame_equal(p.data.T, self.df_norm)
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.xticklabels,
np.asarray(self.df_norm.columns)[
self.x_norm_leaves])
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, 'letters')
nt.assert_equal(p.ylabel, '')
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
kws = self.default_kws.copy()
kws['label'] = True
p = mat._DendrogramPlotter(df.T, **kws)
xticklabels = ["A-1", "B-2", "C-3", "D-4"]
xticklabels = [xticklabels[i] for i in p.reordered_ind]
npt.assert_array_equal(p.xticklabels, xticklabels)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, "letter-number")
def test_axis0_input(self):
kws = self.default_kws.copy()
kws['axis'] = 0
p = mat._DendrogramPlotter(self.df_norm.T, **kws)
npt.assert_array_equal(p.array, np.asarray(self.df_norm.T))
pdt.assert_frame_equal(p.data, self.df_norm.T)
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.xticklabels, self.df_norm_leaves)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, 'letters')
nt.assert_equal(p.ylabel, '')
def test_rotate_input(self):
kws = self.default_kws.copy()
kws['rotate'] = True
p = mat._DendrogramPlotter(self.df_norm, **kws)
npt.assert_array_equal(p.array.T, np.asarray(self.df_norm))
pdt.assert_frame_equal(p.data.T, self.df_norm)
npt.assert_array_equal(p.xticklabels, [])
npt.assert_array_equal(p.yticklabels, self.df_norm_leaves)
nt.assert_equal(p.xlabel, '')
nt.assert_equal(p.ylabel, 'letters')
def test_rotate_axis0_input(self):
kws = self.default_kws.copy()
kws['rotate'] = True
kws['axis'] = 0
p = mat._DendrogramPlotter(self.df_norm.T, **kws)
npt.assert_array_equal(p.reordered_ind, self.x_norm_leaves)
def test_custom_linkage(self):
kws = self.default_kws.copy()
try:
import fastcluster
linkage = fastcluster.linkage_vector(self.x_norm, method='single',
metric='euclidean')
except ImportError:
d = distance.squareform(distance.pdist(self.x_norm,
metric='euclidean'))
linkage = hierarchy.linkage(d, method='single')
dendrogram = hierarchy.dendrogram(linkage, no_plot=True,
color_list=['k'],
color_threshold=-np.inf)
kws['linkage'] = linkage
p = mat._DendrogramPlotter(self.df_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
nt.assert_dict_equal(p.dendrogram, dendrogram)
def test_label_false(self):
kws = self.default_kws.copy()
kws['label'] = False
p = mat._DendrogramPlotter(self.df_norm, **kws)
nt.assert_equal(p.xticks, [])
nt.assert_equal(p.yticks, [])
nt.assert_equal(p.xticklabels, [])
nt.assert_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "")
def test_linkage_scipy(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
scipy_linkage = p._calculate_linkage_scipy()
from scipy.spatial import distance
from scipy.cluster import hierarchy
dists = distance.squareform(distance.pdist(self.x_norm.T,
metric=self.default_kws[
'metric']))
linkage = hierarchy.linkage(dists, method=self.default_kws['method'])
npt.assert_array_equal(scipy_linkage, linkage)
@skipif(_no_fastcluster)
def test_fastcluster_other_method(self):
import fastcluster
kws = self.default_kws.copy()
kws['method'] = 'average'
linkage = fastcluster.linkage(self.x_norm.T, method='average',
metric='euclidean')
p = mat._DendrogramPlotter(self.x_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
@skipif(_no_fastcluster)
def test_fastcluster_non_euclidean(self):
import fastcluster
kws = self.default_kws.copy()
kws['metric'] = 'cosine'
kws['method'] = 'average'
linkage = fastcluster.linkage(self.x_norm.T, method=kws['method'],
metric=kws['metric'])
p = mat._DendrogramPlotter(self.x_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
def test_dendrogram_plot(self):
d = mat.dendrogram(self.x_norm, **self.default_kws)
ax = plt.gca()
d.xmin, d.xmax = ax.get_xlim()
xmax = min(map(min, d.X)) + max(map(max, d.X))
nt.assert_equal(d.xmin, 0)
nt.assert_equal(d.xmax, xmax)
nt.assert_equal(len(ax.get_lines()), len(d.X))
nt.assert_equal(len(ax.get_lines()), len(d.Y))
plt.close('all')
def test_dendrogram_rotate(self):
kws = self.default_kws.copy()
kws['rotate'] = True
d = mat.dendrogram(self.x_norm, **kws)
ax = plt.gca()
d.ymin, d.ymax = ax.get_ylim()
ymax = min(map(min, d.Y)) + max(map(max, d.Y))
nt.assert_equal(d.ymin, 0)
nt.assert_equal(d.ymax, ymax)
plt.close('all')
def test_dendrogram_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(self.df_norm, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(df, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 90)
plt.close(f)
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(df.T, axis=0, rotate=True)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
class TestClustermap(object):
rs = np.random.RandomState(sum(map(ord, "clustermap")))
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
try:
import fastcluster
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
except ImportError:
x_norm_distances = distance.squareform(
distance.pdist(x_norm.T, metric='euclidean'))
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_list=['k'],
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
default_kws = dict(pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None)
default_plot_kws = dict(metric='euclidean', method='average',
colorbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None)
row_colors = color_palette('Set2', df_norm.shape[0])
col_colors = color_palette('Dark2', df_norm.shape[1])
def test_ndarray_input(self):
cm = mat.ClusterGrid(self.x_norm, **self.default_kws)
pdt.assert_frame_equal(cm.data, pd.DataFrame(self.x_norm))
nt.assert_equal(len(cm.fig.axes), 4)
nt.assert_equal(cm.ax_row_colors, None)
nt.assert_equal(cm.ax_col_colors, None)
plt.close('all')
def test_df_input(self):
cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
pdt.assert_frame_equal(cm.data, self.df_norm)
plt.close('all')
def test_corr_df_input(self):
df = self.df_norm.corr()
cg = mat.ClusterGrid(df, **self.default_kws)
cg.plot(**self.default_plot_kws)
diag = cg.data2d.values[np.diag_indices_from(cg.data2d)]
npt.assert_array_equal(diag, np.ones(cg.data2d.shape[0]))
plt.close('all')
def test_pivot_input(self):
df_norm = self.df_norm.copy()
df_norm.index.name = 'numbers'
df_long = pd.melt(df_norm.reset_index(), var_name='letters',
id_vars='numbers')
kws = self.default_kws.copy()
kws['pivot_kws'] = dict(index='numbers', columns='letters',
values='value')
cm = mat.ClusterGrid(df_long, **kws)
pdt.assert_frame_equal(cm.data2d, df_norm)
plt.close('all')
def test_colors_input(self):
kws = self.default_kws.copy()
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.ClusterGrid(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, self.row_colors)
npt.assert_array_equal(cm.col_colors, self.col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
plt.close('all')
def test_nested_colors_input(self):
kws = self.default_kws.copy()
row_colors = [self.row_colors, self.row_colors]
col_colors = [self.col_colors, self.col_colors]
kws['row_colors'] = row_colors
kws['col_colors'] = col_colors
cm = mat.ClusterGrid(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, row_colors)
npt.assert_array_equal(cm.col_colors, col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
plt.close('all')
def test_colors_input_custom_cmap(self):
kws = self.default_kws.copy()
kws['cmap'] = mpl.cm.PRGn
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, self.row_colors)
npt.assert_array_equal(cm.col_colors, self.col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
plt.close('all')
def test_z_score(self):
df = self.df_norm.copy()
df = (df - df.mean()) / df.var()
kws = self.default_kws.copy()
kws['z_score'] = 1
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_z_score_axis0(self):
df = self.df_norm.copy()
df = df.T
df = (df - df.mean()) / df.var()
df = df.T
kws = self.default_kws.copy()
kws['z_score'] = 0
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_standard_scale(self):
df = self.df_norm.copy()
df = (df - df.min()) / (df.max() - df.min())
kws = self.default_kws.copy()
kws['standard_scale'] = 1
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_standard_scale_axis0(self):
df = self.df_norm.copy()
df = df.T
df = (df - df.min()) / (df.max() - df.min())
df = df.T
kws = self.default_kws.copy()
kws['standard_scale'] = 0
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_z_score_standard_scale(self):
kws = self.default_kws.copy()
kws['z_score'] = True
kws['standard_scale'] = True
with nt.assert_raises(ValueError):
cm = mat.ClusterGrid(self.df_norm, **kws)
plt.close('all')
def test_color_list_to_matrix_and_cmap(self):
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
self.col_colors, self.x_norm_leaves)
colors_set = set(self.col_colors)
col_to_value = dict((col, i) for i, col in enumerate(colors_set))
matrix_test = np.array([col_to_value[col] for col in
self.col_colors])[self.x_norm_leaves]
shape = len(self.col_colors), 1
matrix_test = matrix_test.reshape(shape)
cmap_test = mpl.colors.ListedColormap(colors_set)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
plt.close('all')
def test_nested_color_list_to_matrix_and_cmap(self):
colors = [self.col_colors, self.col_colors]
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
colors, self.x_norm_leaves)
all_colors = set(itertools.chain(*colors))
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix_test = np.array(
[color_to_value[c] for color in colors for c in color])
shape = len(colors), len(colors[0])
matrix_test = matrix_test.reshape(shape)
matrix_test = matrix_test[:, self.x_norm_leaves]
matrix_test = matrix_test.T
cmap_test = mpl.colors.ListedColormap(all_colors)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
plt.close('all')
def test_color_list_to_matrix_and_cmap_axis1(self):
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
self.col_colors, self.x_norm_leaves, axis=1)
colors_set = set(self.col_colors)
col_to_value = dict((col, i) for i, col in enumerate(colors_set))
matrix_test = np.array([col_to_value[col] for col in
self.col_colors])[self.x_norm_leaves]
shape = 1, len(self.col_colors)
matrix_test = matrix_test.reshape(shape)
cmap_test = mpl.colors.ListedColormap(colors_set)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
plt.close('all')
def test_savefig(self):
# Not sure if this is the right way to test....
cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
cm.plot(**self.default_plot_kws)
cm.savefig(tempfile.NamedTemporaryFile(), format='png')
plt.close('all')
def test_plot_dendrograms(self):
cm = mat.clustermap(self.df_norm, **self.default_kws)
nt.assert_equal(len(cm.ax_row_dendrogram.get_lines()),
len(cm.dendrogram_row.X))
nt.assert_equal(len(cm.ax_col_dendrogram.get_lines()),
len(cm.dendrogram_col.X))
data2d = self.df_norm.iloc[cm.dendrogram_row.reordered_ind,
cm.dendrogram_col.reordered_ind]
pdt.assert_frame_equal(cm.data2d, data2d)
plt.close('all')
def test_cluster_false(self):
kws = self.default_kws.copy()
kws['row_cluster'] = False
kws['col_cluster'] = False
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_yticks()), 0)
pdt.assert_frame_equal(cm.data2d, self.df_norm)
plt.close('all')
def test_row_col_colors(self):
kws = self.default_kws.copy()
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_colors.collections), 1)
nt.assert_equal(len(cm.ax_col_colors.collections), 1)
plt.close('all')
def test_cluster_false_row_col_colors(self):
kws = self.default_kws.copy()
kws['row_cluster'] = False
kws['col_cluster'] = False
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_row_colors.collections), 1)
nt.assert_equal(len(cm.ax_col_colors.collections), 1)
pdt.assert_frame_equal(cm.data2d, self.df_norm)
plt.close('all')
def test_mask_reorganization(self):
kws = self.default_kws.copy()
kws["mask"] = self.df_norm > 0
g = mat.clustermap(self.df_norm, **kws)
npt.assert_array_equal(g.data2d.index, g.mask.index)
npt.assert_array_equal(g.data2d.columns, g.mask.columns)
npt.assert_array_equal(g.mask.index,
self.df_norm.index[
g.dendrogram_row.reordered_ind])
npt.assert_array_equal(g.mask.columns,
self.df_norm.columns[
g.dendrogram_col.reordered_ind])
plt.close("all")
def test_ticklabel_reorganization(self):
kws = self.default_kws.copy()
xtl = np.arange(self.df_norm.shape[1])
kws["xticklabels"] = list(xtl)
ytl = self.letters.ix[:self.df_norm.shape[0]]
kws["yticklabels"] = ytl
g = mat.clustermap(self.df_norm, **kws)
xtl_actual = [t.get_text() for t in g.ax_heatmap.get_xticklabels()]
ytl_actual = [t.get_text() for t in g.ax_heatmap.get_yticklabels()]
xtl_want = xtl[g.dendrogram_col.reordered_ind].astype("<U1")
ytl_want = ytl[g.dendrogram_row.reordered_ind].astype("<U1")[::-1]
npt.assert_array_equal(xtl_actual, xtl_want)
npt.assert_array_equal(ytl_actual, ytl_want)
plt.close("all")
| bsd-3-clause |
ephes/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
mjsauvinen/P4UL | pyRaster/processDomain.py | 1 | 3976 | #!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot, addScatterPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
[email protected]
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='processDomain.py')
parser.add_argument("-f", "--filename",type=str, help="Name of the comp domain data file.")
parser.add_argument("-fo", "--fileout",type=str, help="Name of output Palm topography file.")
parser.add_argument("-i0","--iZero", help="Pixel ids [N,E] for the zero level.",\
type=int,nargs=2,default=[None,None])
parser.add_argument("-na", "--nansAbove", type=float, default=None,\
help="Replace values above given threshold by <nans> (i.e. fill values). Default=None")
parser.add_argument("-nb", "--nansBelow", type=float, default=None,\
help="Replace values below given threshold by <nans> (i.e. fill values). Default=None")
parser.add_argument("-mw","--mrgnW", help="Zero or non-zero margin widths as ratios (0-1): [L,R,B,T]",\
type=float,nargs=4,default=[None,None,None,None])
parser.add_argument("-mr","--mrgnR", help="Margin ramp widths as ratios (0-1): [L,R,B,T]",\
type=float,nargs=4,default=[None,None,None,None])
parser.add_argument("-mh","--mrgnH", help="Margins heights: [L,R,B,T]. Default=0",\
type=float,nargs=4,default=[0.,0.,0.,0.])
helpFlt = ''' Filter type and its associated number. Available filters:
median, percentile, rank, gaussian, local. Entering \"user, num\" allows the user
to specify <num> different filters consecutively.
Example entry: median 5'''
parser.add_argument("-ft","--filter",type=str,nargs=2,default=[None,None], help=helpFlt)
parser.add_argument("-rx","--rmax", type=float, default=None,\
help="Recover peaks (after filtering) above given value.")
parser.add_argument("-hx","--hmax", type=float, default=None,\
help="Maximum allowable height.")
parser.add_argument("-p", "--printOn", action="store_true", default=False,\
help="Print the resulting raster data.")
parser.add_argument("-pp", "--printOnly", help="Only print the resulting data. Don't save.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
filename= args.filename
fileout = args.fileout
na = args.nansAbove
nb = args.nansBelow
i0 = args.iZero # Rename
mw = args.mrgnW
mr = args.mrgnR
mh = args.mrgnH
flt = args.filter
hmax = args.hmax
rmax = args.rmax
printOn = args.printOn
printOnly = args.printOnly
# Test comment
# Another one
if( flt[0] == None): fltStr = ' '
else: fltStr = flt[0]+'-filtered: '
# Read the raster tile to be processed.
Rdict = readNumpyZTile(filename)
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
print(' Rdims = {} '.format(Rdims))
print(' ROrig = {} '.format(ROrig))
# Set the zero level according to the given pixel value.
if(i0.count(None) == 0):
print(' Zero Level: {} '.format(R[i0[0],i0[1]]))
R0 = R[i0[0],i0[1]]
R -= R0
R[R<0.] = 0.
R = applyMargins( R , mw, mr, mh )
# Apply desired filters.
Rf = np.zeros( np.shape(R) , float)
Rf = filterAndScale(Rf, R, flt )
# Apply nans where fill values are desired.
Rf = replaceByNans( Rf, na, nb)
if( rmax is not None ):
idv = (Rf > rmax)
Rf[idv] = np.maximum( Rf[idv], R[idv] )
if( hmax ):
Rf = np.minimum( hmax , Rf )
Rdict['R'] = Rf; Rdict['GlobOrig'] = ROrig
if( not args.printOnly ):
saveTileAsNumpyZ( fileout, Rdict )
if( args.printOn or args.printOnly ):
figDims = 13.*(Rdims[::-1].astype(float)/np.max(Rdims))
#print('Sum = {}'.format(np.sum(Rf)))
fig = plt.figure(num=1, figsize=figDims)
fig = addImagePlot( fig, Rf, fltStr+fileout )
plt.show()
R = Rf = None
| mit |
simvisage/aramis_cdt | aramis_cdt/aramis_cdt.py | 1 | 17920 | #-------------------------------------------------------------------------
#
# Copyright (c) 2013
# IMB, RWTH Aachen University,
# ISM, Brno University of Technology
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in the AramisCDT top directory "license.txt" and may be
# redistributed only under the conditions described in the aforementioned
# license.
#
# Thanks for using Simvisage open source!
#
#-------------------------------------------------------------------------
from traits.api import \
HasTraits, Float, Property, cached_property, Int, Array, Bool, \
Instance, DelegatesTo, Button, List, Event, on_trait_change
from traitsui.api import View, Item, Group, UItem
import numpy as np
import os
import platform
import time
if platform.system() == 'Linux':
sysclock = time.time
elif platform.system() == 'Windows':
sysclock = time.clock
from aramis_info import AramisInfo
from aramis_data import AramisFieldData
class AramisCDT(HasTraits):
'''Crack Detection Tool for detection of cracks, etc. from Aramis data.
'''
aramis_info = Instance(AramisInfo, params_changed=True)
aramis_data = Instance(AramisFieldData, params_changed=True)
number_of_steps = DelegatesTo('aramis_info')
crack_detection_step = Int(params_changed=True)
'''Index of the step used to determine the crack pattern
'''
#=========================================================================
# Thresholds
#=========================================================================
d_ux_threshold = Float(0.0, params_changed=True)
'''The first derivative of displacement in x-direction threshold
'''
dd_ux_threshold = Float(0.0, params_changed=True)
'''The second derivative of displacement in x-direction threshold
'''
ddd_ux_threshold = Float(-1e-4, params_changed=True)
'''The third derivative of displacement in x-direction threshold
'''
d_ux_avg_threshold = Float(0.0, params_changed=True)
'''The first derivative of displacement in x-direction threshold
'''
dd_ux_avg_threshold = Float(0.0, params_changed=True)
'''Average of he second derivative of displacement in x-direction threshold
'''
ddd_ux_avg_threshold = Float(-1e-4, params_changed=True)
'''Average of the third derivative of displacement in x-direction threshold
'''
#=========================================================================
# Crack detection
#=========================================================================
crack_filter = Property(
Array, depends_on='aramis_data.+params_changed, +params_changed')
@cached_property
def _get_crack_filter(self):
dd_ux = self.aramis_data.dd_ux
ddd_ux = self.aramis_data.ddd_ux
crack_filter = ((dd_ux[:, 1:] * dd_ux[:, :-1] < self.dd_ux_threshold) *
((ddd_ux[:, 1:] + ddd_ux[:, :-1]) / 2.0 < self.ddd_ux_threshold))
# print "number of cracks determined by 'crack_filter': ", np.sum(crack_filter, axis=1)
return crack_filter
number_of_subcracks = Property(
Int, depends_on='aramis_data.+params_changed, +params_changed')
'''Number of sub-cracks
'''
def _get_number_of_subcracks(self):
return np.sum(self.crack_filter)
crack_filter_avg = Property(
Array, depends_on='aramis_data.+params_changed, +params_changed')
@cached_property
def _get_crack_filter_avg(self):
dd_ux_avg = self.aramis_data.dd_ux_avg
ddd_ux_avg = self.aramis_data.ddd_ux_avg
crack_filter_avg = ((dd_ux_avg[1:] * dd_ux_avg[:-1] < self.dd_ux_avg_threshold) *
((ddd_ux_avg[1:] + ddd_ux_avg[:-1]) / 2.0 < self.ddd_ux_avg_threshold))
print 'crack detection step', self.crack_detection_step
print "number of cracks determined by 'crack_filter_avg': ", np.sum(crack_filter_avg)
return crack_filter_avg
number_of_cracks_avg = Property(
Int, depends_on='aramis_data.+params_changed, +params_changed')
'''Number of cracks using averaging
'''
def _get_number_of_cracks_avg(self):
return np.sum(self.crack_filter_avg)
crack_spacing_avg = Property(
Array, depends_on='aramis_data.+params_changed, +params_changed')
@cached_property
def _get_crack_spacing_avg(self):
n_cr_avg = np.sum(self.crack_filter_avg)
if n_cr_avg > 0:
s_cr_avg = self.aramis_data.lx_0 / n_cr_avg
# print "average crack spacing [mm]: %.1f" % (s_cr_avg)
return s_cr_avg
crack_arr = Property(
Array, depends_on='aramis_data.+params_changed, +params_changed')
@cached_property
def _get_crack_arr(self):
# TODO: why
#from aramis_data import get_d2
#print '#'*50, np.nanmax(get_d2(self.aramis_data.ux_arr, self.aramis_data.integ_radius))
#return (get_d2(self.aramis_data.ux_arr, self.aramis_data.integ_radius))[np.where(self.crack_filter)]
#return self.aramis_data.d_ux[np.where(self.crack_filter)]
crack_arr = self.aramis_data.delta_ux_arr[np.where(self.crack_filter)]
return crack_arr
crack_arr_mean = Property(
Float, depends_on='aramis_data.+params_changed, +params_changed')
def _get_crack_arr_mean(self):
return self.crack_arr.mean()
crack_arr_std = Property(
Float, depends_on='aramis_data.+params_changed, +params_changed')
def _get_crack_arr_std(self):
return self.crack_arr.std()
crack_avg_arr = Property(
Array, depends_on='aramis_data.+params_changed, +params_changed')
@cached_property
def _get_crack_avg_arr(self):
return self.aramis_data.d_ux_avg[np.where(self.crack_filter_avg)]
crack_field_arr = Property(
Array, depends_on='aramis_data.+params_changed, +params_changed')
@cached_property
def _get_crack_field_arr(self):
cf_w = np.zeros_like(self.aramis_data.d_ux)
cf_w[np.where(self.crack_filter)] = self.crack_arr
# look at neighboring entries of crack_filter indices for higher displacement jumps
# (=local maximum of 'delta_ux_arr' around indices detected by 'crack_filter')
#
# delta_ux_arr_max = self.aramis_data.delta_ux_arr[np.where(self.crack_filter)]
# y_idx, x_idx = np.where(self.crack_filter)
# for i in range(self.aramis_data.integ_radius_crack + 1):
# x_idx_ = np.minimum(x_idx + i, self.aramis_data.right_i - self.aramis_data.left_i - 1)
# delta_ux_arr_max_ = np.maximum(delta_ux_arr_max, self.aramis_data.delta_ux_arr[(y_idx, x_idx_)])
# for i in range(self.aramis_data.integ_radius_crack + 1):
# x_idx_ = np.maximum(x_idx - i, self.aramis_data.left_i)
# delta_ux_arr_max_ = np.maximum(delta_ux_arr_max, self.aramis_data.delta_ux_arr[(y_idx, x_idx_)])
# cf_w[np.where(self.crack_filter)] = delta_ux_arr_max_
return cf_w
#=========================================================================
# Crack detection in time
#=========================================================================
number_of_cracks_t = Array
'''Number of cracks in time
'''
number_of_missing_facets_t = List
'''Number of missing facets in time (missing, unidentified, not satisfying
conditions, destroyed by crack)
'''
control_strain_t = Array()
'''Control strain measured in the middle of y
'''
init_step_avg_lst = List()
'''List of steps (list length is equal to number of cracks at the
crack_detection_step) when the crack initiate
'''
init_step_lst = List()
crack_width_avg_t = Array
crack_stress_t = Array
number_of_cracks_t_analyse = Bool(True)
def __number_of_cracks_t(self):
self.number_of_cracks_t = np.append(self.number_of_cracks_t,
self.number_of_cracks_avg)
crack_detect_mask_avg = Property(Array, depends_on='crack_detection_step')
'''Mask of cracks identified in crack_detection_step and used for backward
identification of the crack initialization.
'''
@cached_property
def _get_crack_detect_mask_avg(self):
self.aramis_data.current_step = self.crack_detection_step
return self.crack_filter_avg
crack_detect_mask = Property(Array, depends_on='crack_detection_step')
'''Mask of cracks identified in crack_detection_step and used for backward
identification of the crack initialization.
'''
@cached_property
def _get_crack_detect_mask(self):
self.aramis_data.current_step = self.crack_detection_step
return self.crack_filter
run_t = Button('Run in time')
'''Run analysis of all steps in time
'''
def _run_t_fired(self):
import matplotlib.pyplot as plt
# x = self.aramis_data.step_times # self.aramis_cdt.control_strain_t
force = self.aramis_data.ad_channels_arr[:, 1]
x = []
for step_idx in self.aramis_info.step_list:
self.aramis_data.current_step = step_idx
x.append((self.aramis_data.ux_arr[:, -5] - self.aramis_data.ux_arr[:, 5]) /
(self.aramis_data.x_arr_0[:, -5] - self.aramis_data.x_arr_0[:, 5]))
x = np.array(x)
# plt.plot(x, force, color='grey')
x = []
for step_idx in self.aramis_info.step_list:
self.aramis_data.current_step = step_idx
x.append(np.mean((self.aramis_data.ux_arr[:, -5] - self.aramis_data.ux_arr[:, 5])) /
np.mean((self.aramis_data.x_arr_0[:, -5] - self.aramis_data.x_arr_0[:, 5])))
x = np.array(x)
# plt.plot(x, force, 'k-', linewidth=3)
self.control_strain_t = x
self.force = force
# x = []
# for step_idx in self.aramis_info.step_list:
# self.aramis_data.current_step = step_idx
# x.append((self.aramis_data.ux_arr[0, -5] - self.aramis_data.ux_arr[0, 5]) /
# (self.aramis_data.x_arr_0[0, -5] - self.aramis_data.x_arr_0[1, 5]))
# x = np.array(x)
# plt.plot(x, force, color='red')
#
# x = []
# for step_idx in self.aramis_info.step_list:
# self.aramis_data.current_step = step_idx
# x.append((self.aramis_data.ux_arr[-1, -5] - self.aramis_data.ux_arr[-1, 5]) /
# (self.aramis_data.x_arr_0[-1, -5] - self.aramis_data.x_arr_0[-1, 5]))
# x = np.array(x)
# plt.plot(x, force, color='red')
# plt.plot(x[self.init_step_avg_lst], force[
# self.init_step_avg_lst], 'go', ms=6)
# plt.show()
# data_to_save = np.vstack((self.aramis_data.step_times, x, force)).T
# np.savetxt('%s_LD.txt' % self.aramis_info.specimen_name, data_to_save, delimiter=';',
# header='time; strain; force')
'''
start_step_idx = self.aramis_data.current_step
self.number_of_cracks_t = np.array([])
self.number_of_missing_facets_t = []
self.control_strain_t = np.array([])
for step_idx in self.aramis_info.step_list:
self.aramis_data.current_step = step_idx
if self.number_of_cracks_t_analyse:
self.__number_of_cracks_t()
self.control_strain_t = np.append(self.control_strain_t,
(self.aramis_data.ux_arr[20, -10] - self.aramis_data.ux_arr[20, 10]) /
(self.aramis_data.x_arr_0[20, -10] - self.aramis_data.x_arr_0[20, 10]))
self.number_of_missing_facets_t.append(np.sum(np.isnan(self.aramis_data.data_array).astype(int)))
self.aramis_data.current_step = start_step_idx
'''
run_back = Button('Run back in time')
'''Run analysis of all steps in time
'''
def _run_back_fired(self):
step = self.crack_detection_step
crack_step_avg = np.zeros_like(self.crack_detect_mask_avg, dtype=int)
crack_step_avg[self.crack_detect_mask_avg] = step
m = self.crack_detect_mask_avg.copy()
step -= 1
while step:
if step == 0:
break
print step
self.aramis_data.current_step = step
d_ux_avg_mask = (self.aramis_data.d_ux_avg[1:] +
self.aramis_data.d_ux_avg[:-1]) * 0.5 > self.d_ux_avg_threshold
mask = d_ux_avg_mask * m
crack_step_avg[mask] = step
if np.sum(mask) == 0:
break
step -= 1
init_steps_avg = crack_step_avg[crack_step_avg > 0]
self.init_step_avg_lst = init_steps_avg.tolist()
print self.init_step_avg_lst
position_index = np.argwhere(self.crack_detect_mask_avg).flatten()
print 'position index', position_index
position = self.aramis_data.x_arr_0[0, :][self.crack_detect_mask_avg]
print 'position', position
time_of_init = self.aramis_data.step_times[self.init_step_avg_lst]
print 'time of initiation', time_of_init
force = self.aramis_data.ad_channels_arr[:, 1][self.init_step_avg_lst]
print 'force', force
# data_to_save = np.vstack(
# (position_index, position, time_of_init, force)).T
# np.savetxt('%s.txt' % self.aramis_info.specimen_name, data_to_save, delimiter=';',
# header='position_index; position; time_of_init; force')
'''
# todo: better
# x = self.aramis_data.x_arr_0[20, :]
# import matplotlib.pyplot as plt
# plt.rc('font', size=25)
step = self.crack_detection_step
crack_step_avg = np.zeros_like(self.crack_detect_mask_avg, dtype=int)
crack_step_avg[self.crack_detect_mask_avg] = step
crack_step = np.zeros_like(self.crack_detect_mask, dtype=int)
crack_step[self.crack_detect_mask] = step
step -= 1
self.crack_width_avg_t = np.zeros((np.sum(self.crack_detect_mask_avg),
self.number_of_steps))
self.crack_width_t = np.zeros((np.sum(self.crack_detect_mask),
self.number_of_steps))
self.crack_stress_t = np.zeros((np.sum(self.crack_detect_mask_avg),
self.number_of_steps))
# plt.figure()
m = self.crack_detect_mask_avg.copy()
# idx1 = np.argwhere(m == True) - 1
# idx1 = np.delete(idx1, np.argwhere(idx1 < 0))
# m[idx1] = True
# idx2 = np.argwhere(m == True) + 1
# idx2 = np.delete(idx1, np.argwhere(idx2 >= m.shape[0]))
# m[idx2] = True
while step:
print 'step', step
self.aramis_data.current_step = step
mask = self.crack_filter_avg # * m
crack_step_avg[mask] = step
mask = self.crack_filter * self.crack_detect_mask
crack_step[mask] = step
# if number of cracks = 0 break
# plt.plot(x, self.aramis_data.d_ux_avg, color='grey')
# if step == 178:
# # print '[asdfasfas[', self.aramis_data.d_ux_avg
# plt.plot(x, self.aramis_data.d_ux_avg, 'k-', linewidth=3, zorder=10000)
# if np.any(mask) == False:
# print mask
# break
if step == 0:
break
# self.crack_width_avg_t[:, step] = self.aramis_data.d_ux_avg[crack_step_avg > 0]
# self.crack_width_t[:, step] = self.aramis_data.d_ux[crack_step > 0]
# y = self.ad_channels_arr[:, :, 2] - self.ad_channels_arr[:, :, 1]
# self.crack_stress_t[:, step] = y[:, 0][step] * 1e5 / (140 * 60)
step -= 1
# y_max_lim = plt.gca().get_ylim()[-1]
# plt.vlines(x[:-1], [0], self.crack_detect_mask_avg * y_max_lim,
# color='magenta', linewidth=1, zorder=10)
# plt.xlim(0, 527)
# plt.ylim(0, 0.25)
# print 'initializationinit_steps time/step of the crack' , crack_step_avg[crack_step_avg > 0]
#
print 'finished'
# for i in idx1:
# if crack_step_avg[i] < crack_step_avg[i + 1]:
# crack_step_avg[i] = crack_step_avg[i + 1]
# crack_step_avg[i] = 0
# for i in idx2:
# if crack_step_avg[i] < crack_step_avg[i - 1]:
# crack_step_avg[i] = crack_step_avg[i - 1]
# crack_step_avg[i] = 0
init_steps_avg = crack_step_avg[crack_step_avg > 0]
init_steps = crack_step[crack_step > 0]
self.init_step_avg_lst = init_steps_avg.tolist()
self.init_step_lst = init_steps.tolist()
print self.init_step_avg_lst
# init_steps_avg.sort()
# x = np.hstack((0, np.repeat(init_steps_avg, 2), self.aramis_info.last_step))
# y = np.repeat(np.arange(init_steps_avg.size + 1), 2)
# print x, y
#
# plt.figure()
# plt.title('number of cracks vs. step')
# plt.plot(x, y)
#
# plt.figure()
# x = np.hstack((0, np.repeat(init_steps_avg, 2), self.aramis_info.last_step))
# x = self.control_strain_t[x]
# y = np.repeat(np.arange(init_steps_avg.size + 1), 2)
# plt.title('number of cracks vs. control strain')
# plt.plot(x, y)
# plt.show()
'''
view = View(
Item('crack_detection_step'),
Item('d_ux_threshold'),
Item('dd_ux_threshold'),
Item('ddd_ux_threshold'),
Item('d_ux_avg_threshold'),
Item('dd_ux_avg_threshold'),
Item('ddd_ux_avg_threshold'),
Group(
'number_of_cracks_t_analyse',
UItem('run_t'),
UItem('run_back'),
),
id='aramisCDT.cdt',
)
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/tests/test_learning_curve.py | 19 | 10790 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator, self).__init__(
n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3, indices=False)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| apache-2.0 |
faridborbar/02Tarea | codigo.py | 1 | 5656 | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize as opti
'''
PARTE 1
definiremos una funcion choque, que reasigna una velocidad v_n1 para cada v_n, analogo para las posiciones y_inter y y_n.
Las funciones que devuelve están evaluada en "cero", que corresponde a las intersecciones de las posiciones de la superficie y la pelota.
Con esto, a partir de cada velocidad de la particula, podemos obtener la velocidad con la que rebota luego del impacto, junto con la
posicion en la cual ocurre.
def choques(y_n,v_n):
A=1
t_ini=0
g= -1
t_0=0
n=0.15
omega=1.66
y_sup= lambda x: A*(np.sin(omega*x)) #posicion de la superficie
y_pel= lambda x: y_n + v_n*x + ((x**2)*g)*(1/2.) #posicion de la pelota, ecuacion de itinerario
v_sup= lambda x: omega*A*(np.cos(omega*x)) #respectivas velocidades
v_pel= lambda x: v_n + g*x
inter= lambda x: y_pel(x) - y_sup(x)
#Ahora definimos los limites para la biseccion
a= (-1*v_n) * g
b= ((-v_n - ((v_n)**2 - 2*(y_n+A)**(0.5)*g)))/g
#encontramos las intersecciones
cero=opti.bisect(inter,a,b)
v_n1= (n+1)*v_sup(cero) - n*v_pel(cero) #reformulamos la velocidad a partir de la ecuacion dada para v_p'(t)
y_inter=y_pel(cero)
return [y_inter,v_n1]
Iteraciones=input('ingrese numero de iteraciones')
#guardamos las velocidades de la pelota, justo despues del choque, en una lista
#v_0=input('ingrese velocidad inicial')
v_0=2
vel=[]
vel=np.append(vel,v_0)
#Guardaremos los "y" donde choca la pelota con la superficie en una lista
y_0=0
y_inter=[]
y_inter=np.append(y_inter,y_0)
#guardamos los intentos en una lista.
i=0
intento=[]
intento=np.append(intento,i)
#iteramos para obtener arreglos de las posiciones y las velocidades en las que chocan la pelota con la superficie.
while i <=Iteraciones:
y_n = y_inter[i]
v_n = vel[i]
choque = choques(y_n,v_n) #nos entrega la posicion y la velocidad de la pelota justo despues de impactar, Yn+1 y Vn+1
v_choque = choque[1]
y_choque = choque[0]
y_inter = np.append(y_inter,y_choque)
vel = np.append(vel,v_choque)
i+=1
intento=np.append(intento,i)
plt.plot(intento,vel)
plt.ylabel('Velocidad luego del impacto: Vn+1')
plt.xlabel('Numero de intento')
#plt.axis([0,(Iteraciones + 5),0,3])
plt.draw()
plt.show()
'''
'''
PARTE 2
Para esta parte basta repetir la iteracion para cada Velocidad inicial, luego implementeamos otra iteracion.
Iteraciones=input('ingrese el numero de iteraciones')
vel_iniciales= [2,3,4,5,6]
for k in vel_iniciales:
v_0=k
vel=[]
vel=np.append(vel,v_0)
#Guardaremos los "y" donde choca la pelota con la superficie en una lista
y_0=0
y_inter=[]
y_inter=np.append(y_inter,y_0)
#guardamos los intentos en una lista.
i=0
intento=[]
intento=np.append(intento,i)
#iteramos para obtener arreglos de las posiciones y las velocidades en las que chocan la pelota con el suelo.
while i <=Iteraciones:
y_n = y_inter[i]
v_n = vel[i]
choque = choques(y_n,v_n)
v_choque = choque[1]
y_choque = choque[0]
y_inter = np.append(y_inter,y_choque)
vel = np.append(vel,v_choque)
i+=1
intento=np.append(intento,i)
plt.plot(intento,vel)
plt.ylabel('Velocidad luego del impacto: Vn+1')
plt.xlabel('Numero de intento')
plt.axis([0,(Iteraciones + 5),0,3])
plt.draw()
plt.show()
no supe como graficar todo en una foto, perdon. Aqui tocaria ticar un grafico para todos los Viniciales, para
ver cuando empiezan a relajar, de todas formas puede verce en cada grafico por separado.
'''
'''
PARTE 4
Analogamente a la parte 2, haremos iterar la funcion para varios valores de la frecuencia, pero esta vez la funcion choques dependera
tambien de la frecuencia.
'''
def choques(y_n,v_n,omega):
A=1
t_ini=0
g= -1
t_0=0
n=0.15
y_sup= lambda x: A*(np.sin(omega*x)) #posicion de la superficie
y_pel= lambda x: y_n + v_n*x + ((x**2)*g)*(1/2.) #posicion de la pelota, ecuacion de itinerario
v_sup= lambda x: omega*A*(np.cos(omega*x))
v_pel= lambda x: v_n + g*x
inter= lambda x: y_pel(x) - y_sup(x)
#Ahora definimos los limites para la biseccion
a= (*v_n) * (-1)*g
b= ((-v_n - ((v_n)**2 - 2*(y_n+A)**(0.5)*g)))/g
#encontramos las intersecciones
cero=opti.bisect(inter,a,b)
v_n1= (n+1)*v_sup(cero) - n*v_pel(cero)
y_inter=y_pel(cero)
return [y_inter,v_n1]
Iteraciones=input('ingrese el numero de iteraciones')
vel_iniciales= [2]
Q=input('ingrese el numero de frecuencias a considerar, se elijiran en un rango uniforme')
Salto=(1.79-1.66)/Q
Omegas= np.arange(1.66, 1.79, Salto)
for k in Omegas:
v_0=2
vel=[]
vel=np.append(vel,v_0)
#Guardaremos los "y" donde choca la pelota con la superficie en una lista
y_0=0
y_inter=[]
y_inter=np.append(y_inter,y_0)
#guardamos los intentos en una lista.
i=0
intento=[]
intento=np.append(intento,i)
#iteramos para obtener arreglos de las posiciones y las velocidades en las que chocan la pelota con el suelo.
while i <=Iteraciones:
y_n = y_inter[i]
v_n = vel[i]
choque = choques(y_n,v_n,k)
v_choque = choque[1]
y_choque = choque[0]
y_inter = np.append(y_inter,y_choque)
vel = np.append(vel,v_choque)
i+=1
intento=np.append(intento,i)
plt.plot(intento,vel)
plt.ylabel('Velocidad luego del impacto: Vn+1')
plt.xlabel('Numero de intento')
#plt.axis([0,(Iteraciones + 5),0,3])
plt.draw()
plt.show()
| mit |
LiaoPan/blaze | blaze/compute/core.py | 5 | 14061 | from __future__ import absolute_import, division, print_function
import numbers
from datetime import date, datetime
import toolz
from toolz import first, concat, memoize, unique, assoc
import itertools
from collections import Iterator
from ..compatibility import basestring
from ..expr import Expr, Field, Symbol, symbol, eval_str
from ..dispatch import dispatch
__all__ = ['compute', 'compute_up']
base = (numbers.Number, basestring, date, datetime)
@dispatch(Expr, object)
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data
@dispatch(Expr, object)
def post_compute(expr, result, scope=None):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope=None, **kwargs):
return type(seq)(compute(item, scope or {}, **kwargs) for item in seq)
@dispatch(Expr, object)
def compute(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
ts = set([x for x in expr._subterms() if isinstance(x, Symbol)])
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(object)
def compute_down(expr, **kwargs):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr
def issubtype(a, b):
""" A custom issubclass """
if issubclass(a, b):
return True
if issubclass(a, (tuple, list, set)) and issubclass(b, Iterator):
return True
if issubclass(b, (tuple, list, set)) and issubclass(a, Iterator):
return True
return False
def type_change(old, new):
""" Was there a significant type change between old and new data?
>>> type_change([1, 2], [3, 4])
False
>>> type_change([1, 2], [3, [1,2,3]])
True
Some special cases exist, like no type change from list to Iterator
>>> type_change([[1, 2]], [iter([1, 2])])
False
"""
if all(isinstance(x, base) for x in old + new):
return False
if len(old) != len(new):
return True
new_types = list(map(type, new))
old_types = list(map(type, old))
return not all(map(issubtype, new_types, old_types))
def top_then_bottom_then_top_again_etc(expr, scope, **kwargs):
""" Compute expression against scope
Does the following interpreter strategy:
1. Try compute_down on the entire expression
2. Otherwise compute_up from the leaves until we experience a type change
(e.g. data changes from dict -> pandas DataFrame)
3. Re-optimize expression and re-pre-compute data
4. Go to step 1
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i4')])
>>> e = s.amount.sum() + 1
>>> top_then_bottom_then_top_again_etc(e, {s: data})
601
See Also
--------
bottom_up_until_type_break -- uses this for bottom-up traversal
top_to_bottom -- older version
bottom_up -- older version still
"""
# 0. Base case: expression is in dict, return associated data
if expr in scope:
return scope[expr]
if not hasattr(expr, '_leaves'):
return expr
leaf_exprs = list(expr._leaves())
leaf_data = [scope.get(leaf) for leaf in leaf_exprs]
# 1. See if we have a direct computation path with compute_down
try:
return compute_down(expr, *leaf_data, **kwargs)
except NotImplementedError:
pass
# 2. Compute from the bottom until there is a data type change
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
# 3. Re-optimize data and expressions
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
if pre_compute_:
scope3 = dict((e, pre_compute_(e, datum,
**assoc(kwargs, 'scope', scope2)))
for e, datum in scope2.items())
else:
scope3 = scope2
if optimize_:
try:
expr3 = optimize_(expr2, *[scope3[leaf] for leaf in expr2._leaves()])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
scope4 = dict((e._subs(_d), d) for e, d in scope3.items())
except NotImplementedError:
expr3 = expr2
scope4 = scope3
else:
expr3 = expr2
scope4 = scope3
# 4. Repeat
if expr.isidentical(expr3):
raise NotImplementedError("Don't know how to compute:\n"
"expr: %s\n"
"data: %s" % (expr3, scope4))
else:
return top_then_bottom_then_top_again_etc(expr3, scope4, **kwargs)
def top_to_bottom(d, expr, **kwargs):
""" Processes an expression top-down then bottom-up """
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
if not hasattr(expr, '_leaves'):
return expr
leaves = list(expr._leaves())
data = [d.get(leaf) for leaf in leaves]
# See if we have a direct computation path with compute_down
try:
return compute_down(expr, *data, **kwargs)
except NotImplementedError:
pass
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
# Otherwise...
# Compute children of this expression
if hasattr(expr, '_inputs'):
children = [top_to_bottom(d, child, **kwargs)
for child in expr._inputs]
else:
children = []
# Did we experience a data type change?
if type_change(data, children):
# If so call pre_compute again
if pre_compute_:
children = [pre_compute_(expr, child, **kwargs) for child in children]
# If so call optimize again
if optimize_:
try:
expr = optimize_(expr, *children)
except NotImplementedError:
pass
# Compute this expression given the children
return compute_up(expr, *children, scope=d, **kwargs)
_names = ('leaf_%d' % i for i in itertools.count(1))
_leaf_cache = dict()
_used_tokens = set()
def _reset_leaves():
_leaf_cache.clear()
_used_tokens.clear()
def makeleaf(expr):
""" Name of a new leaf replacement for this expression
>>> _reset_leaves()
>>> t = symbol('t', '{x: int, y: int, z: int}')
>>> makeleaf(t)
t
>>> makeleaf(t.x)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x + 1)
x
>>> makeleaf(t.x).isidentical(makeleaf(t.x + 1))
False
>>> from blaze import sin, cos
>>> x = symbol('x', 'real')
>>> makeleaf(cos(x)**2).isidentical(sin(x)**2)
False
>>> makeleaf(t) is t # makeleaf passes on Symbols
True
"""
name = expr._name or '_'
token = None
if expr in _leaf_cache:
return _leaf_cache[expr]
if isinstance(expr, Symbol): # Idempotent on symbols
return expr
if (name, token) in _used_tokens:
for token in itertools.count():
if (name, token) not in _used_tokens:
break
result = symbol(name, expr.dshape, token)
_used_tokens.add((name, token))
_leaf_cache[expr] = result
return result
def data_leaves(expr, scope):
return [scope[leaf] for leaf in expr._leaves()]
def bottom_up_until_type_break(expr, scope, **kwargs):
""" Traverse bottom up until data changes significantly
Parameters
----------
expr: Expression
Expression to compute
scope: dict
namespace matching leaves of expression to data
Returns
-------
expr: Expression
New expression with lower subtrees replaced with leaves
scope: dict
New scope with entries for those leaves
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i8')])
This computation completes without changing type. We get back a leaf
symbol and a computational result
>>> e = (s.amount + 1).distinct()
>>> bottom_up_until_type_break(e, {s: data}) # doctest: +SKIP
(amount, {amount: array([101, 201, 301])})
This computation has a type change midstream (``list`` to ``int``), so we
stop and get the unfinished computation.
>>> e = s.amount.sum() + 1
>>> bottom_up_until_type_break(e, {s: data})
(amount_sum + 1, {amount_sum: 600})
"""
# 0. Base case. Return if expression is in scope
if expr in scope:
leaf = makeleaf(expr)
return leaf, {leaf: scope[expr]}
inputs = list(unique(expr._inputs))
# 1. Recurse down the tree, calling this function on children
# (this is the bottom part of bottom up)
exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs)
for i in inputs])
# 2. Form new (much shallower) expression and new (more computed) scope
new_scope = toolz.merge(new_scopes)
new_expr = expr._subs(dict((i, e) for i, e in zip(inputs, exprs)
if not i.isidentical(e)))
old_expr_leaves = expr._leaves()
old_data_leaves = [scope.get(leaf) for leaf in old_expr_leaves]
# 3. If the leaves have changed substantially then stop
key = lambda x: str(type(x))
if type_change(sorted(new_scope.values(), key=key),
sorted(old_data_leaves, key=key)):
return new_expr, new_scope
# 4. Otherwise try to do some actual work
try:
leaf = makeleaf(expr)
_data = [new_scope[i] for i in new_expr._inputs]
except KeyError:
return new_expr, new_scope
try:
return leaf, {leaf: compute_up(new_expr, *_data, scope=new_scope,
**kwargs)}
except NotImplementedError:
return new_expr, new_scope
def bottom_up(d, expr):
"""
Process an expression from the leaves upwards
Parameters
----------
d : dict mapping {Symbol: data}
Maps expressions to data elements, likely at the leaves of the tree
expr : Expr
Expression to compute
Helper function for ``compute``
"""
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
# Compute children of this expression
children = ([bottom_up(d, child) for child in expr._inputs]
if hasattr(expr, '_inputs') else [])
# Compute this expression given the children
result = compute_up(expr, *children, scope=d)
return result
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Examples
--------
>>> from blaze import Data
>>> t = Data([1, 2, 3], dshape='3 * int', name='t')
>>> swap_resources_into_scope(t.head(2), {})
(t.head(2), {t: [1, 2, 3]})
>>> expr, scope = _
>>> list(scope.keys())[0]._resources()
{}
"""
resources = expr._resources()
symbol_dict = dict((t, symbol(t._name, t.dshape)) for t in resources)
resources = dict((symbol_dict[k], v) for k, v in resources.items())
other_scope = dict((k, v) for k, v in scope.items()
if k not in symbol_dict)
new_scope = toolz.merge(resources, other_scope)
expr = expr._subs(symbol_dict)
return expr, new_scope
@dispatch(Expr, dict)
def compute(expr, d, **kwargs):
""" Compute expression against data sources
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
_reset_leaves()
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
post_compute_ = kwargs.get('post_compute', post_compute)
expr2, d2 = swap_resources_into_scope(expr, d)
if pre_compute_:
d3 = dict(
(e, pre_compute_(e, dat, **kwargs))
for e, dat in d2.items()
if e in expr2
)
else:
d3 = d2
if optimize_:
try:
expr3 = optimize_(expr2, *[v for e, v in d3.items() if e in expr2])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
d4 = dict((e._subs(_d), d) for e, d in d3.items())
except NotImplementedError:
expr3 = expr2
d4 = d3
else:
expr3 = expr2
d4 = d3
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
if post_compute_:
result = post_compute_(expr3, result, scope=d4)
return result
@dispatch(Field, dict)
def compute_up(expr, data, **kwargs):
return data[expr._name]
| bsd-3-clause |
mblondel/scikit-learn | sklearn/utils/graph.py | 50 | 6169 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
unicef/rhizome | rhizome/tests/test_refresh_master.py | 1 | 14788 | import json
from django.test import TestCase
from django.contrib.auth.models import User
from pandas import read_csv, notnull, to_datetime
from rhizome.models.campaign_models import Campaign, CampaignType
from rhizome.models.location_models import Location
from rhizome.models.indicator_models import Indicator, IndicatorTag,\
CalculatedIndicatorComponent
from rhizome.models.document_models import Document, DocDetailType, \
DocumentDetail, SourceSubmission, SourceObjectMap
from rhizome.models.datapoint_models import DocDataPoint, DataPoint
# ./manage.py test rhizome.tests.test_refresh_master.RefreshMasterTestCase.test_refresh_master_init --settings=rhizome.settings.test
class RefreshMasterTestCase(TestCase):
def __init__(self, *args, **kwargs):
self.location_code_input_column = 'geocode'
self.campaign_code_input_column = 'campaign'
self.data_date_input_column = 'submission_date'
self.uq_code_input_column = 'uq_id'
super(RefreshMasterTestCase, self).__init__(*args, **kwargs)
def set_up(self):
'''
Refresh master needs a few peices of metadata to be abel to do it's job.
Location, Campaign, User .. all of the main models that you can see
initialized in the first migrations in the datapoints application.
The set up method also runs the CampaignDocTransform method which simulates
the upload of a csv or processing of an ODK submission. Ideally this
test will run independently of this module, but for now this is how
we initialize data in the system via the .csv below.
'''
self.test_file_location = 'ebola_data.csv'
self.location_list = Location.objects.all().values_list('name', flat=True)
self.create_metadata()
self.user = User.objects.get(username='test')
self.document = Document.objects.get(doc_title='test')
self.document.docfile = self.test_file_location
self.document.save()
self.document.transform_upload()
def test_refresh_master_init(self):
self.set_up()
self.document.refresh_master()
self.assertTrue(True) # FIXME ..
def test_submission_detail_refresh(self,):
self.set_up()
source_submissions_data = SourceSubmission.objects\
.filter(document_id=self.document.id)\
.values_list('id', flat=True)
self.document.refresh_submission_details()
submission_details = SourceSubmission.objects\
.filter(document_id=self.document.id)
self.assertEqual(len(source_submissions_data), len(submission_details))
def test_latest_data_gets_synced(self):
'''
I upload a spreadsheet on tuesday, but i realized that the data was wrong, so i upload another sheet with the same locations, dates
and indicators. The new spreasheet should override, and not aggregate any duplicative data.
'''
self.set_up()
test_ind_id = Indicator.objects.all()[0].id
test_loc_id = Location.objects.all()[0].id
test_campaign_id = Campaign.objects.all()[0].id
bad_val, good_val = 10, 20
data_date = '2015-12-31'
ss_old = SourceSubmission.objects\
.filter(document_id=self.document.id)[0]
doc_to_override = Document.objects.create(
doc_title='override',
created_by_id=self.user.id,
guid='override'
)
ss_new = SourceSubmission.objects.create(
document_id=doc_to_override.id,
instance_guid='override',
row_number=1,
data_date='2016-01-01',
location_code='OVERRIDE',
location_display='OVERRIDE',
submission_json='',
process_status=1
)
base_doc_dp_dict = {
'document_id': self.document.id,
'indicator_id': test_ind_id,
'location_id': test_loc_id,
'campaign_id': test_campaign_id,
'data_date': data_date,
'agg_on_location': True,
}
bad_doc_dp_dict = {
'value': bad_val,
'data_date': data_date,
'campaign_id': test_campaign_id,
'source_submission_id': ss_old.id,
}
bad_doc_dp_dict.update(base_doc_dp_dict)
good_doc_dp_dict = {
'value': good_val,
'data_date': data_date,
'campaign_id': test_campaign_id,
'source_submission_id': ss_new.id,
}
good_doc_dp_dict.update(base_doc_dp_dict)
DocDataPoint.objects.create(**good_doc_dp_dict)
DocDataPoint.objects.create(**bad_doc_dp_dict)
self.document.sync_datapoint()
dp_result = DataPoint.objects.filter(
location_id=test_loc_id,
indicator_id=test_ind_id,
data_date=data_date
)
self.assertEqual(1, len(dp_result))
self.assertEqual(good_val, dp_result[0].value)
def test_submission_to_datapoint(self):
'''
This simulates the following use case:
As a user journey we can describe this test case as:
- user uploads file ( see how set_up method calls CampaignDocTransform )
- user maps metadata
- user clicks " refresh master "
-> user checks to see if data is correct
- user realizes that the data is wrong, due to an invalid mapping
- user re-mapps the data and clicks " refresh master"
-> data from old mapping should be deleted and associated to
the newly mapped value
TEST CASES:
1. WHen the submission detail is refreshed, the location/campaign ids
that we mapped should exist in that row.
2. DocDataPoint records are created if the necessary mapping exists
3. There are no zero or null values allowed in doc_datapoint
4. The doc_datapoint from #3 is merged into datpaoint.
5. I create mappings, sync data, realize the mapping was incorrect,
re-map the metadata and the old data should be deleted, the new
data created.
-> was the old data deleted?
-> was the new data created?
'''
self.set_up()
submission_qs = SourceSubmission.objects\
.filter(document_id=self.document.id)\
.values_list('id', 'submission_json')[0]
ss_id, first_submission = submission_qs[
0], json.loads(submission_qs[1])
location_code = first_submission[self.location_code_input_column]
campaign_code = first_submission[self.campaign_code_input_column]
first_submission[self.data_date_input_column]
raw_indicator_list = [k for k, v in first_submission.iteritems()]
indicator_code = raw_indicator_list[-1]
## SIMULATED USER MAPPING ##
# see: source-data/Nigeria/2015/06/mapping/2
## choose meta data values for the source_map update ##
map_location_id = Location.objects.all()[0].id
first_indicator_id = Indicator.objects.all()[0].id
first_campaign = Campaign.objects.all()[0].id
## map location ##
som_id_l = SourceObjectMap.objects.get(
content_type='location',
source_object_code=location_code,
)
som_id_l.master_object_id = map_location_id
som_id_l.save()
## map indicator ##
som_id_i = SourceObjectMap.objects.get(
content_type='indicator',
source_object_code=indicator_code,
)
som_id_i.master_object_id = first_indicator_id
som_id_i.save()
## map campaign ##
som_id_c = SourceObjectMap.objects.get(
content_type='campaign',
source_object_code=campaign_code,
)
som_id_c.master_object_id = first_campaign
som_id_c.save()
self.document.refresh_submission_details()
first_submission_detail = SourceSubmission.objects\
.get(id=ss_id)
## Test Case 2 ##
self.assertEqual(
first_submission_detail.get_location_id(), map_location_id)
## now that we have created the mappign, "refresh_master" ##
## should create the relevant datapoints ##
self.document.submissions_to_doc_datapoints()
doc_dp_ids = DocDataPoint.objects.filter(
document_id=self.document.id, indicator_id=first_indicator_id).values()
# Test Case #3
self.assertEqual(1, len(doc_dp_ids))
self.document.sync_datapoint()
dps = DataPoint.objects.all()
# Test Case #4
self.assertEqual(1, len(dps))
# Test Case #5
## update the mapping with a new indicator value ##
new_indicator_id = Indicator.objects.all()[1].id
som_id_i.master_object_id = new_indicator_id
som_id_i.save()
self.document.refresh_master()
dp_with_new_indicator = DataPoint.objects.filter(
indicator_id=new_indicator_id)
dp_with_old_indicator = DataPoint.objects.filter(
indicator_id=first_indicator_id)
## did new indicator flow through the system ?##
self.assertEqual(1, len(dp_with_new_indicator))
# did the old indicator data get deleted?
self.assertEqual(0, len(dp_with_old_indicator))
def create_metadata(self):
'''
Creating the Indicator, location, Campaign, meta data needed for the
system to aggregate / caclulate.
'''
top_lvl_tag = IndicatorTag.objects.create(id=1, tag_name='Polio')
campaign_df = read_csv('rhizome/tests/_data/campaigns.csv')
location_df = read_csv('rhizome/tests/_data/locations.csv')
indicator_df = read_csv('rhizome/tests/_data/indicators.csv')
calc_indicator_df = read_csv\
('rhizome/tests/_data/calculated_indicator_component.csv')
user_id = User.objects.create_user('test', '[email protected]', 'test').id
document_id = Document.objects.create(
doc_title='test',
created_by_id=user_id,
guid='test').id
for ddt in ['uq_id_column', 'username_column', 'image_col',
'date_column', 'location_column', 'location_display_name']:
DocDetailType.objects.create(name=ddt)
for rt in ["country", "settlement", "province", "district", "sub-district"]:
DocDetailType.objects.create(name=rt)
campaign_type = CampaignType.objects.create(id=1, name="test")
self.model_df_to_data(location_df, Location)
campaign_df['start_date'] = to_datetime(campaign_df['start_date'])
campaign_df['end_date'] = to_datetime(campaign_df['end_date'])
self.model_df_to_data(campaign_df, Campaign)
self.model_df_to_data(indicator_df, Indicator)
calc_indicator_ids = self.model_df_to_data(calc_indicator_df,
CalculatedIndicatorComponent)
rg_conif = DocumentDetail.objects.create(
document_id=document_id,
doc_detail_type_id=DocDetailType
.objects.get(name='location_column').id,
doc_detail_value=self.location_code_input_column
)
cp_conif = DocumentDetail.objects.create(
document_id=document_id,
doc_detail_type_id=DocDetailType
.objects.get(name='date_column').id,
doc_detail_value=self.data_date_input_column
)
uq_id_config = DocumentDetail.objects.create(
document_id=document_id,
doc_detail_type_id=DocDetailType
.objects.get(name='uq_id_column').id,
doc_detail_value=self.uq_code_input_column
)
def test_campaign_data_ingest(self):
# ./manage.py test rhizome.tests.test_refresh_master.RefreshMasterTestCase.test_campaign_data_ingest --settings=rhizome.settings.test
self.set_up()
test_file_location = 'allAccessData.csv'
test_df = read_csv('rhizome/tests/_data/' + test_file_location)
document = Document.objects.create(doc_title='allAccessData')
document.docfile = test_file_location
document.save()
## create locatino_meta ##
distinct_location_codes = test_df['geocode'].unique()
for l in distinct_location_codes:
l_id = Location.objects.create(
name=l,
location_code=l,
location_type_id=1
).id
l_som = SourceObjectMap.objects.create(
master_object_id=l_id,
content_type='location',
source_object_code=str(l)
)
## create campaign meta ##
distinct_campaign_codes = test_df['campaign'].unique()
for i, (c) in enumerate(distinct_campaign_codes):
c_id = Campaign.objects.create(
name=c,
campaign_type_id=1,
start_date='2010-01-0' + str(i + 1),
end_date='2010-01-0' + str(i + 1)
).id
c_som = SourceObjectMap.objects.create(
master_object_id=c_id,
content_type='campaign',
source_object_code=str(c)
)
## create indicator_meta ##
access_indicator_id = Indicator.objects.create(
name='access', short_name='access'
).id
som_obj = SourceObjectMap.objects.create(
master_object_id=access_indicator_id,
content_type='indicator',
source_object_code='# Missed children due to inaccessibility (NEPI)'
)
document.transform_upload()
self.document.refresh_master()
ss_id_list = SourceSubmission.objects\
.filter(document_id=document.id)\
.values_list('id', flat=True)
doc_dp_id_list = DocDataPoint.objects\
.filter(source_submission_id__in=ss_id_list)\
.values_list('id', flat=True)
dp_id_list = DataPoint.objects\
.filter(source_submission_id__in=ss_id_list)\
.values_list('id', flat=True)
self.assertEqual(len(ss_id_list), len(test_df))
self.assertEqual(len(doc_dp_id_list), len(dp_id_list))
def model_df_to_data(self, model_df, model):
meta_ids = []
non_null_df = model_df.where((notnull(model_df)), None)
list_of_dicts = non_null_df.transpose().to_dict()
for row_ix, row_dict in list_of_dicts.iteritems():
row_id = model.objects.create(**row_dict)
meta_ids.append(row_id)
return meta_ids
| agpl-3.0 |
michaelhuang/QuantSoftwareToolkit | QSTK/qstkstudy/Events.py | 5 | 1878 | # (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#Created on October <day>, 2011
#
#@author: Vishal Shekhar
#@contact: [email protected]
#@summary: Example Event Datamatrix acceptable to EventProfiler App
#
import pandas
from QSTK.qstkutil import DataAccess as da
import numpy as np
import math
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(symbols, d_data, verbose=False):
# Get the data from the data store
storename = "Yahoo" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10
if verbose:
print __name__ + " reading data"
close = d_data[closefield]
if verbose:
print __name__ + " finding events"
for symbol in symbols:
close[symbol][close[symbol]>= 1.0] = np.NAN
for i in range(1,len(close[symbol])):
if np.isnan(close[symbol][i-1]) and close[symbol][i] < 1.0 :#(i-1)th was > $1, and (i)th is <$1
close[symbol][i] = 1.0 #overwriting the price by the bit
close[symbol][close[symbol]< 1.0] = np.NAN
return close
| bsd-3-clause |
lazywei/scikit-learn | examples/model_selection/plot_roc.py | 146 | 3697 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
vshtanko/scikit-learn | sklearn/svm/classes.py | 22 | 39977 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' \
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
michigraber/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
olologin/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
ifarup/ciefunctions | tc1_97/plot.py | 2 | 33194 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
plot: Generate matplotlib plots for the tc1_97 package.
Copyright (C) 2012-2020 Ivar Farup and Jan Henrik Wold
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import matplotlib
import threading
from matplotlib.ticker import MaxNLocator
lock = threading.Lock()
def LMS(axes, plots, options):
"""
Plot the CIE 2006 LMS cone fundamentals (6 sign.figs.) onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
if options['log10']:
axes.plot(plots['logLMS'][:, 0], plots['logLMS'][:, 1], 'r')
axes.plot(plots['logLMS'][:, 0], plots['logLMS'][:, 2], 'g')
axes.plot(plots['logLMS'][:, 0], plots['logLMS'][:, 3], 'b')
axes.axis('auto')
axes.set_xlim((350, 850))
axes.set_ylim((-7.4, 0.4))
axes.yaxis.set_major_locator(MaxNLocator(nbins = 7, min_n_ticks = 6))
else:
axes.plot(plots['LMS'][:, 0], plots['LMS'][:, 1], 'r')
axes.plot(plots['LMS'][:, 0], plots['LMS'][:, 2], 'g')
axes.plot(plots['LMS'][:, 0], plots['LMS'][:, 3], 'b')
axes.axis('auto')
axes.axis([350, 850, -.05, 1.05])
if options['axis_labels']:
axes.set_xlabel('Wavelength (nm)', fontsize=10.5)
if options['log10']:
axes.set_ylabel('$\mathrm{Log}\,_{10}\,\mathrm{(relativ\,\,energy\,\,sensitivity)}$',
fontsize=10.5)
else:
axes.set_ylabel('Relative energy sensitivities', fontsize=10.5)
if options['full_title']:
title = (u'CIE 2006 LMS cone fundamentals\u000a' +
'Field size: %s''' % plots['field_size'] +
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) +
u' yr, Domain: %s nm \u2013 %s nm' %
(plots['λ_min'], plots['λ_max']) +
', Step: %s nm' % plots['λ_step'])
if options['log10']:
title += ', Logarithmic values'
axes.set_title(title, fontsize=options['title_fontsize'])
else:
axes.set_title('CIE 2006 LMS cone fundamentals',
fontsize=options['title_fontsize'])
lock.release()
def LMS_base(axes, plots, options):
"""
Plot the CIE 2006 LMS cone fundamentals (9 sign. figs) onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
if options['log10']:
axes.plot(plots['logLMS_base'][:, 0], plots['logLMS_base'][:, 1], 'r')
axes.plot(plots['logLMS_base'][:, 0], plots['logLMS_base'][:, 2], 'g')
axes.plot(plots['logLMS_base'][:, 0], plots['logLMS_base'][:, 3], 'b')
axes.axis('auto')
axes.set_xlim((350, 850))
axes.set_ylim((-7.4, 0.4))
axes.yaxis.set_major_locator(MaxNLocator(nbins = 7, min_n_ticks = 6))
else:
axes.plot(plots['LMS_base'][:, 0], plots['LMS_base'][:, 1], 'r')
axes.plot(plots['LMS_base'][:, 0], plots['LMS_base'][:, 2], 'g')
axes.plot(plots['LMS_base'][:, 0], plots['LMS_base'][:, 3], 'b')
axes.axis('auto')
axes.axis([350, 850, -.05, 1.05])
if options['axis_labels']:
axes.set_xlabel('Wavelength (nm)', fontsize=10.5)
if options['log10']:
axes.set_ylabel('$\mathrm{Log}\,_{10}\,\mathrm{(relativ\,\,energy\,\,sensitivity)}$',
fontsize=10.5)
else:
axes.set_ylabel('Relative energy sensitivities', fontsize=10.5)
if options['full_title']:
title = ('CIE 2006 LMS cone fundamentals ' +
'(9 sign. figs. data)\nField size: %s''' %
plots['field_size'] + u'\N{DEGREE SIGN}, Age: ' +
str(plots['age']) +
u' yr, Domain: %s nm \u2013 %s nm' %
( plots['λ_min'], plots['λ_max']) +
', Step: %s nm' % plots['λ_step'])
if options['log10']:
title += ', Logarithmic values'
axes.set_title(title, fontsize=options['title_fontsize'])
else:
axes.set_title('CIE 2006 LMS cone fundamentals (9 sign. figs. data)',
fontsize=options['title_fontsize'])
lock.release()
def ls_mb(axes, plots, options):
"""
Plot the MacLeod-Boynton ls diagram onto the given axes
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(plots['lms_mb'][:, 1], plots['lms_mb'][:, 3], 'k')
axes.plot(plots['lms_mb_tg_purple'][:, 1],
plots['lms_mb_tg_purple'][:, 2], 'k')
λ_values = np.concatenate(
([plots['lms_mb'][0, 0]], np.arange(410, 490, 10),
[500, 550, 575, 600, 700], [plots['lms_mb'][-1, 0]]))
for λ in λ_values: # add wavelength parameters
ind = np.nonzero(plots['lms_mb'][:, 0] == λ)[0]
axes.plot(plots['lms_mb'][ind, 1], plots['lms_mb'][ind, 3],
'o', markeredgecolor='k', markerfacecolor='w')
if λ > 490:
align = 'bottom'
elif λ == 830:
align = 'top'
else:
align = 'center'
if options['labels'] and np.shape(ind)[0] > 0:
axes.text(plots['lms_mb'][ind, 1], plots['lms_mb'][ind, 3],
' ' + '%.0f' %
λ, fontsize=options['label_fontsize'],
verticalalignment=align)
axes.plot(plots['lms_mb_white'][0], plots['lms_mb_white'][2], 'kx')
if options['labels']:
axes.text(plots['lms_mb_white'][0], plots['lms_mb_white'][2], ' E',
fontsize=options['label_fontsize'],
verticalalignment=align)
axes.axis('scaled')
axes.set_xlim((-.05, 1.05))
axes.set_ylim((-.05, 1.05))
if options['axis_labels']:
if (float(plots['λ_min']) == 390 and
float(plots['λ_max']) == 830 and
float(plots['λ_step']) == 1):
axes.set_xlabel('$l_\mathrm{\,MB,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
axes.set_ylabel('$s_\mathrm{\,MB,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
else:
axes.set_xlabel('$l_\mathrm{\,MB,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '\,(%s-%s,\,%s)}$' %
(plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
axes.set_ylabel('$s_\mathrm{\,MB,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '\,(%s-%s,\,%s)}$' %
(plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
if options['full_title']:
axes.set_title((u'MacLeod\u2013Boynton ls chromaticity ' +
u'diagram\nField size: %s''' % plots['field_size'] +
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) +
u' yr, Domain: %s nm \u2013 %s nm' %
(plots['λ_min'], plots['λ_max']) +
', Step: %s nm' % plots['λ_step']),
fontsize=options['title_fontsize'])
else:
axes.set_title(u'MacLeod\u2013Boynton ls chromaticity diagram',
fontsize=options['title_fontsize'])
lock.release()
def lm_mw(axes, plots, options):
"""
Plot the Maxwellian lm diagram onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(plots['lms_mw'][:, 1], plots['lms_mw'][:, 2], 'k')
axes.plot(plots['lms_mw_tg_purple'][:, 1],
plots['lms_mw_tg_purple'][:, 2], 'k')
λ_values = np.concatenate(
(np.arange(450, 631, 10), [700], [plots['lms_mw'][0, 0]],
[plots['lms_mw'][-1, 0]]))
for λ in λ_values: # add wavelength parameters
ind = np.nonzero(plots['lms_mw'][:, 0] == λ)[0]
axes.plot(plots['lms_mw'][ind, 1], plots['lms_mw'][ind, 2],
'o', markeredgecolor='k', markerfacecolor='w')
if λ == 390:
align = 'top'
else:
align = 'center'
if options['labels'] and np.shape(ind)[0] > 0:
axes.text(plots['lms_mw'][ind, 1], plots['lms_mw'][ind, 2],
' ' + '%.0f' % λ,
fontsize=options['label_fontsize'],
verticalalignment=align)
axes.plot(plots['lms_mw_white'][0], plots['lms_mw_white'][1], 'kx')
if options['labels']:
axes.text(plots['lms_mw_white'][0], plots['lms_mw_white'][1],
' E', fontsize=options['label_fontsize'],
verticalalignment=align)
axes.axis('scaled')
axes.set_xlim((-.05, 1.05))
axes.xaxis.set_major_locator( MaxNLocator(nbins = 6, min_n_ticks = 3) )
axes.set_ylim((-.05, .65))
axes.yaxis.set_major_locator( MaxNLocator(nbins = 4, min_n_ticks = 3) )
if options['axis_labels']:
if (float(plots['λ_min']) == 390 and
float(plots['λ_max']) == 830 and
float(plots['λ_step']) == 1):
axes.set_xlabel('$l_\mathrm{\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
axes.set_ylabel('$m_\mathrm{\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
else:
axes.set_xlabel('$l_\mathrm{\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '\,(%s-%s,\,%s)}$' %
(plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
axes.set_ylabel('$m_\mathrm{\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '\,(%s-%s,\,%s)}$' %
(plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
if options['full_title']:
axes.set_title(
('Maxwellian lm chromaticity diagram\nField size: %s' %
plots['field_size'] +
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) +
u' yr, Domain: %s nm \u2013 %s nm' % (plots['λ_min'],
plots['λ_max']) +
', Step: %s nm' % plots['λ_step'] + ', Renormalized values'),
fontsize=options['title_fontsize'])
else:
axes.set_title('Maxwellian lm chromaticity diagram',
fontsize=options['title_fontsize'])
lock.release()
def XYZ(axes, plots, options):
"""
Plot the CIE XYZ cone-fundamental-based tristimulus functions onto the
given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
if options['norm']:
XYZ = plots['XYZ_N']
else:
XYZ = plots['XYZ']
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(XYZ[:, 0], XYZ[:, 1], 'r')
axes.plot(XYZ[:, 0], XYZ[:, 2], 'g')
axes.plot(XYZ[:, 0], XYZ[:, 3], 'b')
if options['cie31']:
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 1], 'r--')
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 2], 'g--')
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 3], 'b--')
if options['cie64']:
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 1], 'r:')
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 2], 'g:')
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 3], 'b:')
axes.axis('auto')
axes.axis([350, 850, -.2, 2.3])
if options['axis_labels']:
axes.set_xlabel('Wavelength (nm)', fontsize=10.5)
axes.set_ylabel('Cone-fundamental-based tristimulus values',
fontsize=10.5)
if options['full_title']:
title = 'CIE XYZ cone-fundamental-based tristimulus functions\n' + \
'Field size: %s''' % plots['field_size'] + \
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) + \
u' yr, Domain: %s nm \u2013 %s nm' % \
(plots['λ_min'], plots['λ_max']) + \
', Step: %s nm' % plots['λ_step']
if options['norm']:
title += ', Renormalized values'
axes.set_title(title, fontsize=options['title_fontsize'])
else:
axes.set_title('CIE XYZ cone-fundamental-based ' +
'tristimulus functions',
fontsize=options['title_fontsize'])
lock.release()
def xy(axes, plots, options):
"""
Plot the CIE xy cone-fundamental-based chromaticity diagram onto the given
axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
if options['norm']:
xyz = plots['xyz_N']
xyz_white = plots['xyz_white_N']
xyz_tg_purple = plots['xyz_tg_purple_N']
else:
xyz = plots['xyz']
xyz_white = plots['xyz_white']
xyz_tg_purple = plots['xyz_tg_purple']
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
λ_values = np.concatenate(
([xyz[0, 0]], np.arange(470, 611, 10), [700], [xyz[-1, 0]]))
if options['cie31']:
axes.plot(plots['xyz31'][:, 1], plots['xyz31'][:, 2], 'k--')
axes.plot(plots['xyz31_tg_purple'][:, 1],
plots['xyz31_tg_purple'][:, 2], 'k--')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(plots['xyz31'][:, 0] == l)[0]
axes.plot(plots['xyz31'][ind, 1], plots['xyz31'][ind, 2], 'ko')
if options['cie64']:
axes.plot(plots['xyz64'][:, 1], plots['xyz64'][:, 2], 'k:')
axes.plot(plots['xyz64_tg_purple'][:, 1],
plots['xyz64_tg_purple'][:, 2], 'k:')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(plots['xyz64'][:, 0] == l)[0]
axes.plot(plots['xyz64'][ind, 1], plots['xyz64'][ind, 2], 'ks')
axes.plot(xyz[:, 1], xyz[:, 2], 'k')
axes.plot(xyz_tg_purple[:, 1], xyz_tg_purple[:, 2], 'k')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(xyz[:, 0] == l)[0]
axes.plot(xyz[ind, 1], xyz[ind, 2],
'o', markeredgecolor='k', markerfacecolor='w')
if l == 700 or l == 390:
align = 'top'
elif l == 830:
align = 'bottom'
else:
align = 'center'
if options['labels']:
if np.shape(ind)[0] > 0:
axes.text(xyz[ind, 1], xyz[ind, 2], ' ' + '%.0f' %
l, fontsize=options['label_fontsize'],
verticalalignment=align)
axes.plot(xyz_white[0], xyz_white[1], 'kx')
if options['labels']:
axes.text(xyz_white[0], xyz_white[1], ' E',
fontsize=options['label_fontsize'], verticalalignment=align)
axes.axis('scaled')
axes.set_xlim((-.05, 1.05))
axes.set_ylim((-.05, 1.05))
if options['axis_labels']:
if (float(plots['λ_min']) == 390 and
float(plots['λ_max']) == 830 and
float(plots['λ_step']) == 1):
axes.set_xlabel('$x_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
axes.set_ylabel('$y_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
else:
axes.set_xlabel('$x_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) +
'\,(%s-%s,\,%s)}$' % (plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
axes.set_ylabel('$y_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) +
'\,(%s-%s,\,%s)}$' % (plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
if options['full_title']:
title = 'CIE xy cone-fundamental-based chromaticity diagram\n' +\
'Field size: %s' % plots['field_size'] + \
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) + \
u' yr, Domain: %s nm \u2013 %s nm' % (plots['λ_min'],
plots['λ_max']) + \
', Step: %s nm' % plots['λ_step']
if options['norm']:
title += ', Renormalized values'
axes.set_title(title, fontsize=options['title_fontsize'])
else:
axes.set_title('CIE xy cone-fundamental-based chromaticity diagram',
fontsize=options['title_fontsize'])
lock.release()
def XYZ_purples(axes, plots, options):
"""
Plot the XYZ cone-fundamental-based tristimulus functions for purple-
line stimuli, as parameterized by complementary wavelength, onto the
given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
if options['norm']:
XYZ_p = plots['XYZ_purples_N']
else:
XYZ_p = plots['XYZ_purples']
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(XYZ_p[:, 0], XYZ_p[:, 1], 'r')
axes.plot(XYZ_p[:, 0], XYZ_p[:, 2], 'g')
axes.plot(XYZ_p[:, 0], XYZ_p[:, 3], 'b')
axes.axis('auto')
axes.axis([480, 580, -.04, .54])
if options['axis_labels']:
axes.set_xlabel('Complementary wavelength (nm)', fontsize=10.5)
axes.set_ylabel('Cone-fundamental-based tristimulus values',
fontsize=10.5)
if options['full_title']:
title = 'XYZ cone-fundamental-based tristimulus functions for ' + \
'purple-line stimuli\n' + \
'Field size: %s''' % plots['field_size'] + \
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) + \
u' yr, Domain: %s nm \u2013 %s nm' % \
(plots['λ_min'], plots['λ_max']) + \
', Step: %s nm' % plots['λ_step']
if options['norm']:
title += ', Renormalized values'
axes.set_title(title, fontsize=options['title_fontsize'])
else:
axes.set_title('CIE XYZ cone-fundamental-based ' +
'tristimulus functions',
fontsize=options['title_fontsize'])
lock.release()
def xy_purples(axes, plots, options):
"""
Plot the CIE xy chromaticity diagram, with marking of purple-line stimuli,
onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
if options['norm']:
xyz = plots['xyz_N']
xyz_purples = plots['xyz_purples_N']
xyz_white = plots['xyz_white_N']
xyz_tg_purple = plots['xyz_tg_purple_N']
else:
xyz = plots['xyz']
xyz_purples = plots['xyz_purples']
xyz_white = plots['xyz_white']
xyz_tg_purple = plots['xyz_tg_purple']
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(xyz_purples[:, 1], xyz_purples[:, 2], 'k')
λ_values = np.arange(400, 700, 10)
axes.plot(xyz[:, 1], xyz[:, 2], 'k')
axes.plot(xyz_tg_purple[:, 1], xyz_tg_purple[:, 2], 'k')
axes.plot(xyz_white[0], xyz_white[1], 'kx')
axes.plot(xyz_tg_purple[0, 1], xyz_tg_purple[0, 2],
'o', markeredgecolor='k', markerfacecolor='w')
axes.plot(xyz_tg_purple[1, 1], xyz_tg_purple[1, 2],
'o', markeredgecolor='k', markerfacecolor='w')
if options['labels']:
axes.text(xyz_tg_purple[0, 1], xyz_tg_purple[0, 2], ' ' + '%.1f' %
xyz_tg_purple[0, 0], fontsize=options['label_fontsize'],
verticalalignment='center')
axes.text(xyz_tg_purple[1, 1], xyz_tg_purple[1, 2], ' ' + '%.1f' %
xyz_tg_purple[1, 0], fontsize=options['label_fontsize'],
verticalalignment='center')
for l in λ_values: # add complementary-wavelength parameters
ind = np.nonzero(xyz_purples[:, 0] == l)[0]
axes.plot(xyz_purples[ind, 1], xyz_purples[ind, 2],
'o', markeredgecolor='k', markerfacecolor='w')
if options['labels']:
if np.shape(ind)[0] > 0:
axes.text(xyz_purples[ind, 1], xyz_purples[ind, 2],
' ' + '%.0fc' %
l, fontsize=options['label_fontsize'],
verticalalignment='center')
if options['labels']:
axes.text(xyz_white[0], xyz_white[1], ' E',
fontsize=options['label_fontsize'],
verticalalignment='center')
axes.axis('scaled')
axes.set_xlim((-.05, 1.05))
axes.set_ylim((-.05, 1.05))
if options['axis_labels']:
if (float(plots['λ_min']) == 390 and
float(plots['λ_max']) == 830 and
float(plots['λ_step']) == 1):
axes.set_xlabel('$x_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
axes.set_ylabel('$y_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) + '}$',
fontsize=11)
else:
axes.set_xlabel('$x_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) +
'\,(%s-%s,\,%s)}$' % (plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
axes.set_ylabel('$y_\mathrm{\,F,\,' +
str(plots['field_size']) + ',\,' +
str(plots['age']) +
'\,(%s-%s,\,%s)}$' % (plots['λ_min'],
plots['λ_max'],
plots['λ_step']),
fontsize=11)
if options['full_title']:
title = 'xy cone-fundamental-based chromaticity diagram (purple-line stimuli)\n' + \
'Field size: %s' % plots['field_size'] + \
u'\N{DEGREE SIGN}, Age: ' + str(plots['age']) + \
u' yr, Domain: %s nm \u2013 %s nm' % (plots['λ_min'], \
plots['λ_max']) + \
', Step: %s nm' % plots['λ_step']
if options['norm']:
title += ', Renormalized values'
axes.set_title(title, fontsize=options['title_fontsize'])
else:
axes.set_title('CIE xy cone-fundamental-based chromaticity diagram',
fontsize=options['title_fontsize'])
lock.release()
def XYZ31(axes, plots, options):
"""
Plot the CIE 1931 XYZ CMFs onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 1], 'r')
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 2], 'g')
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 3], 'b')
if options['cie64']:
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 1], 'r:')
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 2], 'g:')
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 3], 'b:')
axes.axis('auto')
axes.axis([350, 850, -.2, 2.3])
if options['axis_labels']:
axes.set_xlabel('Wavelength (nm)', fontsize=10.5)
axes.set_ylabel('Tristimulus values', fontsize=10.5)
axes.set_title(
u'CIE 1931 XYZ standard 2\N{DEGREE SIGN} colour-matching functions',
fontsize=options['title_fontsize'])
lock.release()
def XYZ64(axes, plots, options):
"""
Plot the CIE 1964 XYZ CMFs onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 1], 'r')
axes.plot(plots['XYZ64'][:, 0], plots['XYZ64'][:, 2], 'g')
axes.plot(plots['xyz64'][:, 0], plots['XYZ64'][:, 3], 'b')
if options['cie31']:
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 1], 'r--')
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 2], 'g--')
axes.plot(plots['XYZ31'][:, 0], plots['XYZ31'][:, 3], 'b--')
axes.axis('auto')
axes.axis([350, 850, -.2, 2.3])
if options['axis_labels']:
axes.set_xlabel('Wavelength (nm)', fontsize=10.5)
axes.set_ylabel('Tristimulus values', fontsize=10.5)
axes.set_title(
u'CIE 1964 XYZ standard 10\N{DEGREE SIGN} colour-matching functions',
fontsize=options['title_fontsize'])
lock.release()
def xy31(axes, plots, options):
"""
Plot the CIE 1931 xy chromaticity diagram onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
λ_values = np.concatenate(([390], np.arange(470, 611, 10), [700, 830]))
if options['cie64']:
axes.plot(plots['xyz64'][:, 1], plots['xyz64'][:, 2], 'k:')
axes.plot(plots['xyz64_tg_purple'][:, 1],
plots['xyz64_tg_purple'][:, 2], 'k:')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(plots['xyz64'][:, 0] == l)[0]
axes.plot(plots['xyz64'][ind, 1], plots['xyz64'][ind, 2], 'ks')
axes.plot(plots['xyz31'][:, 1], plots['xyz31'][:, 2], 'k')
axes.plot(plots['xyz31_tg_purple'][:, 1],
plots['xyz31_tg_purple'][:, 2], 'k')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(plots['xyz31'][:, 0] == l)[0]
axes.plot(plots['xyz31'][ind, 1], plots['xyz31'][ind, 2], 'ko')
if l == 700 or l == 390:
align = 'top'
elif l == 830:
align = 'bottom'
else:
align = 'center'
if options['labels']:
axes.text(plots['xyz31'][ind, 1],
plots['xyz31'][ind, 2], ' ' + '%.0f' %
l, fontsize=options['label_fontsize'],
verticalalignment=align)
axes.plot(0.33331,0.33329, 'kx')
if options['labels']:
axes.text(1./3, 1./3, ' E',
fontsize=options['label_fontsize'], verticalalignment=align)
axes.axis('scaled')
axes.set_xlim((-.05, 1.05))
axes.set_ylim((-.05, 1.05))
if options['axis_labels']:
axes.set_xlabel('$x$', fontsize=11)
axes.set_ylabel('$y$', fontsize=11)
axes.set_title(
u'CIE 1931 xy standard 2\N{DEGREE SIGN} chromaticity diagram',
fontsize=options['title_fontsize'])
lock.release()
def xy64(axes, plots, options):
"""
Plot the CIE 1964 xy chromaticity diagram onto the given axes.
Parameters
----------
axes : Axes
Matplotlib axes on which to plot.
plots : dict
Data for plotting as returned by tc1_97.
options : dict
Plotting options (see code for use).
"""
lock.acquire()
axes.clear()
axes.grid(options['grid'])
axes.tick_params(labelsize=10)
λ_values = np.concatenate(([390], np.arange(470, 611, 10), [700, 830]))
if options['cie31']:
axes.plot(plots['xyz31'][:, 1], plots['xyz31'][:, 2], 'k--')
axes.plot(plots['xyz31_tg_purple'][:, 1],
plots['xyz31_tg_purple'][:, 2], 'k--')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(plots['xyz31'][:, 0] == l)[0]
axes.plot(plots['xyz31'][ind, 1], plots['xyz31'][ind, 2], 'ko')
axes.plot(plots['xyz64'][:, 1], plots['xyz64'][:, 2], 'k')
axes.plot(plots['xyz64_tg_purple'][:, 1],
plots['xyz64_tg_purple'][:, 2], 'k')
for l in λ_values: # add wavelength parameters
ind = np.nonzero(plots['xyz64'][:, 0] == l)[0]
axes.plot(plots['xyz64'][ind, 1], plots['xyz64'][ind, 2], 'ks')
if l == 700 or l == 390:
align = 'top'
elif l == 830:
align = 'bottom'
else:
align = 'center'
if options['labels']:
axes.text(plots['xyz64'][ind, 1],
plots['xyz64'][ind, 2], ' ' + '%.0f' %
l, fontsize=options['label_fontsize'],
verticalalignment=align)
axes.plot(0.33330, 0.33333, 'kx')
if options['labels']:
axes.text(1./3, 1./3, ' E',
fontsize=options['label_fontsize'], verticalalignment=align)
axes.axis('scaled')
axes.set_xlim((-.05, 1.05))
axes.set_ylim((-.05, 1.05))
if options['axis_labels']:
axes.set_xlabel('$x_{10}$', fontsize=11)
axes.set_ylabel('$y_{10}$', fontsize=11)
axes.set_title(
u'CIE 1964 xy standard 10\N{DEGREE SIGN} chromaticity diagram',
fontsize=options['title_fontsize'])
lock.release()
| gpl-3.0 |
bjackman/trappy | trappy/plotter/ILinePlotGen.py | 1 | 9380 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is helper module for :mod:`trappy.plotter.ILinePlot`
for adding HTML and javascript necessary for interactive
plotting. The Linear to 2-D co-ordination transformations
are done by using the functionality in
:mod:`trappy.plotter.PlotLayout`
"""
from trappy.plotter import AttrConf
import uuid
from collections import OrderedDict
import json
import os
from trappy.plotter import IPythonConf
from trappy.plotter.ColorMap import to_dygraph_colors
if not IPythonConf.check_ipython():
raise ImportError("No Ipython Environment found")
from IPython.display import display, HTML
def df_to_dygraph(data_frame):
"""Helper function to convert a :mod:`pandas.DataFrame` to
dygraph data
:param data_frame: The DataFrame to be converted
:type data_frame: :mod:`pandas.DataFrame`
"""
values = data_frame.values.tolist()
data = [[x] for x in data_frame.index.tolist()]
for idx, (_, val) in enumerate(zip(data, values)):
data[idx] += val
return {
"data": data,
"labels": ["index"] + data_frame.columns.tolist(),
}
class ILinePlotGen(object):
"""
:param num_plots: The total number of plots
:type num_plots: int
The linear co-ordinate system :math:`[0, N_{plots}]` is
mapped to a 2-D coordinate system with :math:`N_{rows}`
and :math:`N_{cols}` such that:
.. math::
N_{rows} = \\frac{N_{cols}}{N_{plots}}
"""
def _add_graph_cell(self, fig_name, color_map):
"""Add a HTML table cell to hold the plot"""
colors_opt_arg = ", " + to_dygraph_colors(color_map) if color_map else ""
graph_js = ''
lib_urls = [IPythonConf.DYGRAPH_COMBINED_URL, IPythonConf.DYGRAPH_SYNC_URL,
IPythonConf.UNDERSCORE_URL]
for url in lib_urls:
graph_js += '<!-- TRAPPY_PUBLISH_SOURCE_LIB = "{}" -->\n'.format(url)
graph_js += """
<script>
/* TRAPPY_PUBLISH_IMPORT = "plotter/js/ILinePlot.js" */
/* TRAPPY_PUBLISH_REMOVE_START */
var ilp_req = require.config( {
paths: {
"dygraph-sync": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/synchronizer") + """',
"dygraph": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/dygraph-combined") + """',
"ILinePlot": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/ILinePlot") + """',
"underscore": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/underscore-min") + """',
},
shim: {
"dygraph-sync": ["dygraph"],
"ILinePlot": {
"deps": ["dygraph-sync", "dygraph", "underscore"],
"exports": "ILinePlot"
}
}
});
/* TRAPPY_PUBLISH_REMOVE_STOP */
ilp_req(["require", "ILinePlot"], function() { /* TRAPPY_PUBLISH_REMOVE_LINE */
ILinePlot.generate(""" + fig_name + "_data" + colors_opt_arg + """);
}); /* TRAPPY_PUBLISH_REMOVE_LINE */
</script>
"""
cell = '<td style="border-style: hidden;"><div class="ilineplot" id="{}"></div></td>'.format(fig_name)
self._html.append(cell)
self._js.append(graph_js)
def _add_legend_cell(self, fig_name):
"""Add HTML table cell for the legend"""
legend_div_name = fig_name + "_legend"
cell = '<td style="border-style: hidden;"><div style="text-align:center" id="{}"></div></td>'.format(legend_div_name)
self._html.append(cell)
def _begin_row(self):
"""Add the opening tag for HTML row"""
self._html.append("<tr>")
def _end_row(self):
"""Add the closing tag for the HTML row"""
self._html.append("</tr>")
def _end_table(self):
"""Add the closing tag for the HTML table"""
self._html.append("</table>")
def _generate_fig_name(self):
"""Generate a unique figure name"""
fig_name = "fig_" + uuid.uuid4().hex
self._fig_map[self._fig_index] = fig_name
self._fig_index += 1
return fig_name
def _init_html(self, color_map):
"""Initialize HTML code for the plots"""
table = '<table style="border-style: hidden;">'
self._html.append(table)
if self._attr["title"]:
cell = '<caption style="text-align:center; font: 24px sans-serif bold; color: black">{}</caption>'.format(self._attr["title"])
self._html.append(cell)
for _ in range(self._rows):
self._begin_row()
legend_figs = []
for _ in range(self._attr["per_line"]):
fig_name = self._generate_fig_name()
legend_figs.append(fig_name)
self._add_graph_cell(fig_name, color_map)
self._end_row()
self._begin_row()
for l_fig in legend_figs:
self._add_legend_cell(l_fig)
self._end_row()
self._end_table()
def __init__(self, num_plots, **kwargs):
self._attr = kwargs
self._html = []
self._js = []
self._js_plot_data = []
self.num_plots = num_plots
self._fig_map = {}
self._fig_index = 0
self._single_plot = False
if self.num_plots == 0:
raise RuntimeError("No plots for the given constraints")
if self.num_plots < self._attr["per_line"]:
self._attr["per_line"] = self.num_plots
self._rows = (self.num_plots / self._attr["per_line"])
if self.num_plots % self._attr["per_line"] != 0:
self._rows += 1
self._attr["height"] = AttrConf.HTML_HEIGHT
self._init_html(kwargs.pop("colors", None))
def _check_add_scatter(self, fig_params):
"""Check if a scatter plot is needed
and augment the fig_params accordingly"""
if self._attr["scatter"]:
fig_params["drawPoints"] = True
fig_params["strokeWidth"] = 0.0
else:
fig_params["drawPoints"] = False
fig_params["strokeWidth"] = AttrConf.LINE_WIDTH
fig_params["pointSize"] = self._attr["point_size"]
def add_plot(self, plot_num, data_frame, title="", test=False):
"""Add a plot for the corresponding index
:param plot_num: The linear index of the plot
:type plot_num: int
:param data_frame: The data for the plot
:type data_frame: :mod:`pandas.DataFrame`
:param title: The title for the plot
:type title: str
"""
datapoints = sum(len(v) for _, v in data_frame.iteritems())
if datapoints > self._attr["max_datapoints"]:
msg = "This plot is too big and will probably make your browser unresponsive. If you are happy to wait, pass max_datapoints={} to view()".\
format(datapoints + 1)
raise ValueError(msg)
fig_name = self._fig_map[plot_num]
fig_params = {}
fig_params["data"] = df_to_dygraph(data_frame)
fig_params["name"] = fig_name
fig_params["rangesel"] = False
fig_params["logscale"] = False
fig_params["title"] = title
fig_params["step_plot"] = self._attr["step_plot"]
fig_params["fill_graph"] = self._attr["fill"]
if "fill_alpha" in self._attr:
fig_params["fill_alpha"] = self._attr["fill_alpha"]
fig_params["fill_graph"] = True
fig_params["per_line"] = self._attr["per_line"]
fig_params["height"] = self._attr["height"]
self._check_add_scatter(fig_params)
# Use a hash of this object as a default for the sync group, so that if
# 'sync_zoom=True' then by default (i.e. if 'group' is not specified),
# all the plots in a figure are synced.
fig_params["syncGroup"] = self._attr.get("group", str(hash(self)))
fig_params["syncZoom"] = self._attr.get("sync_zoom",
AttrConf.DEFAULT_SYNC_ZOOM)
if "ylim" in self._attr:
fig_params["valueRange"] = self._attr["ylim"]
if "xlim" in self._attr:
fig_params["dateWindow"] = self._attr["xlim"]
fig_data = "var {}_data = {};".format(fig_name, json.dumps(fig_params))
self._js_plot_data.append("<script>")
self._js_plot_data.append(fig_data)
self._js_plot_data.append("</script>")
def finish(self):
"""Called when the Plotting is finished"""
display(HTML(self.html()))
def html(self):
"""Return the raw HTML text"""
return "\n".join(self._html + self._js_plot_data + self._js)
| apache-2.0 |
elijah513/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.