repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
meco-group/omg-tools | omgtools/gui/gcode_reader.py | 1 | 10086 | try:
import tkinter as tk
import tkinter.filedialog as tkfiledialog
except ImportError:
import Tkinter as tk
import tkFileDialog as tkfiledialog
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .gcode_block import generate_gcodeblock
try:
input = raw_input
except NameError:
pass
class GCodeReader(object):
def __init__(self):
self.root = tk.Tk();
self.root.withdraw()
self.GCodes = ['G00', 'G01', 'G02', 'G03'] # list of supported GCode commands
self.commands = []
self.blocks = []
def reset(self):
self.commands = []
self.blocks = []
def load_file(self, file=None):
if file is None:
file = tkfiledialog.askopenfilename(filetypes=[('GCode files', '.nc'), ('all files','.*')])
self.filepath = file.rsplit('/',1)[0] # save file path without the file name
if file:
try:
data = open(file, 'rb')
except Exception as details:
tkmessagebox.showerror(('Error'),details)
return
else:
try:
data = open(file, 'rb')
except Exception as details:
print(details)
return
self.file = data
def convert(self):
# shift midpoint and scale up or down a certain GCode file
# ask the user if he wants to change the GCode file
answer = ''
while (not answer in ['yes', 'no']):
answer = input('Do you want to shift or scale the loaded GCode? (yes/no): ')
if answer == 'yes':
# shift and scale the GCode file
lines = self.file.read().splitlines()
# get user input for offset and scaling factor
offset = []
scaling = []
while (not ((type(offset) == list) and (len(offset) == 2) and (all(isinstance(o, float) for o in offset)))
or not (isinstance(scaling, float))):
offset = input('What is the offset? E.g. a,b to move midpoint from x,y to x-a, y-b: ').split(',')
offset = [float(offset[0]), float(offset[1])]
scaling = float(input('What is the scaling factor? E.g. 2.1: '))
# apply offset
offset_lines = []
for line in lines:
if any(type in line for type in ['G00', 'G01', 'G02', 'G03']):
# only look at lines containing a GCode command
split_line = line.split()
for idx, s in enumerate(split_line):
if 'X' in s:
x_val = float(s[1:])
x_val_n = x_val-offset[0]
split_line[idx] = 'X' + str(x_val_n)
if 'Y' in s:
y_val = float(s[1:])
y_val_n = y_val-offset[1]
split_line[idx] = 'Y' + str(y_val_n)
offset_lines.append(' '.join(split_line))
# apply scaling
new_lines = []
for line in offset_lines:
if any(type in line for type in ['G00', 'G01', 'G02', 'G03']):
# only look at lines containing a GCode command
split_line = line.split()
for idx, s in enumerate(split_line):
if 'X' in s:
x_val = float(s[1:])
x_val_n = x_val*scaling
split_line[idx] = 'X' + str(x_val_n)
if 'Y' in s:
y_val = float(s[1:])
y_val_n = y_val*scaling
split_line[idx] = 'Y' + str(y_val_n)
if 'I' in s:
i_val = float(s[1:])
i_val_n = i_val*scaling
split_line[idx] = 'I' + str(i_val_n)
if 'J' in s:
j_val = float(s[1:])
j_val_n = j_val*scaling
split_line[idx] = 'J' + str(j_val_n)
new_lines.append(' '.join(split_line))
# get file name and remove extension '.nc'
old_name = self.file.name.split('/')[-1][:-3]
file = open(old_name + '_shift_scale.nc', 'w')
for line in new_lines:
file.write(line+'\n')
file.close()
file = open(old_name+'_shift_scale.nc', 'rb')
self.file = file
# else: # do nothing
def read(self, file=None):
self.subfiles = []
if file is None:
file_str = self.file.readlines()
else:
file_str = file.readlines()
for line in file_str:
# extract commands
if line[0] == '(':
# this is a commented line
pass
elif line[0] == 'G' or (line[0] == 'N' and (any([code in line for code in self.GCodes]))):
self.commands.append(line)
# extract subfiles: sometimes the self.file .nc file contains references to extra .nc files
elif (line[0] == 'N' and ' L ' in line):
# this is a line specifying a subfile
info = line.split()
for i in info:
if '.nc' in i:
filename = i
self.subfiles.append(filename)
# else: skip line, this is not a G-code block
# loop over all subfiles
for f in self.subfiles:
f = self.filepath +'/subfiles/'+ f
file_data = open(f, 'rb')
file_str = file_data.readlines()
for line in file_str:
# extract commands
if line[0] == 'G' or (line[0] == 'N' and 'G' in line):
self.commands.append(line)
def create_blocks(self):
# represents all GCode commands as corresponding GCode block-object
self.cnt = 0 # reset counter
if not self.blocks:
# first block, so add empty block
prev_block = None
else:
prev_block = self.blocks[-1]
for command in self.commands:
self.cnt += 1
new_block = generate_gcodeblock(command, self.cnt, prev_block)
if (new_block is not None and not
(new_block.type in ['G00', 'G01'] and
new_block.X0 == new_block.X1 and
new_block.Y0 == new_block.Y1 and
new_block.Z0 == new_block.Z1)):
# there was a new block and there was a movement compared
# to the last block
# new previous block = last one added
self.blocks.append(new_block)
prev_block = new_block
elif new_block is not None:
prev_block = new_block
# else: # new block was not valid, don't add it
def get_gcode(self):
return self.blocks
def get_block_division(self, GCode):
# divide the provided GCode in different collections of blocks:
# Z-axis movements and XY-plane movements
# this function can handle two possible situations:
# 1) machining several layers, with a z-movement at the end of each layer
# 2) retracting the tool at some points to e.g. machine a sequence of circles
# for both cases the complete GCode is split in different parts
# for 1)
# 1. machining
# 2. move z-axis deeper
# for 2)
# 1. machining
# 2. z-axis retraction (may be done fast)
# 3. movement without machining (may be done fast)
# 4. z-axis approach (must be done slowly)
GCode_blocks = [] # contains the different GCode parts in a list of lists
blocks = [] # local variable to save the different GCode parts temporarily
for block in GCode:
if block.type not in ['G00', 'G01'] or (block.Z0 == block.Z1):
# arc segment ('G02', 'G03'), or no movement in z-direction
blocks.append(block)
else:
# this is a z-retract/engage block
# save the previous blocks
if blocks:
GCode_blocks.append(blocks)
# clear blocks variable
blocks = []
# add z-block separately
GCode_blocks.append([block])
GCode_blocks.append(blocks)
return GCode_blocks
def get_connections(self):
# returns a list of points in which the different GCode trajectories are connected
connections = []
for block in self.blocks:
connections.append([block.X0,block.Y0, block.Z0])
connections.append([self.blocks[-1].X0, self.blocks[-1].Y0, self.blocks[-1].Z0]) # add last point
connections.append([self.blocks[0].X0, self.blocks[0].Y0, self.blocks[0].Z0]) # close contour
self.connections = np.array(connections)
def plot_gcode(self):
# plot the workpiece that is represented by the GCode
self.get_connections()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(self.connections[:,0], self.connections[:,1], self.connections[:,2], color='red', marker='x', label='GCode')
coordinates = []
for block in self.blocks:
coords = block.get_coordinates()
for c in coords:
coordinates.append(c)
self.coords = np.array(coordinates)
ax.plot(self.coords[:,0], self.coords[:,1], self.coords[:,2], color='blue', label='GCode')
plt.show(block=False)
def run(self):
self.load_file()
self.convert()
self.read()
self.create_blocks()
self.plot_gcode()
GCode = self.get_gcode()
return GCode | lgpl-3.0 |
jcornford/pyecog | pyecog/ndf/utils.py | 1 | 5661 | import pandas as pd
import os
def get_time_from_filename_with_mcode( filepath, return_string = True, split_on_underscore = False):
# convert m name
filename = os.path.split(filepath)[1]
if filename.endswith('.ndf'):
tstamp = float(filename.split('.')[0][-10:])
elif filename.endswith('.h5'):
tstamp = float(filename.split('_')[0][-10:])
elif split_on_underscore:
tstamp = float(filename.split('_')[0][-10:])
else:
print('fileformat for splitting unknown')
return 0
if return_string:
ndf_time = str(pd.Timestamp.fromtimestamp(tstamp)).replace(':', '-')
ndf_time = ndf_time.replace(' ', '-')
return ndf_time
else:
ndf_time = pd.Timestamp.fromtimestamp(tstamp)
return ndf_time
def add_seconds_to_pandas_timestamp(seconds, timestamp):
new_stamp = timestamp + pd.Timedelta(seconds=float(seconds))
return new_stamp
def get_time_from_seconds_and_filepath(filepath, seconds,split_on_underscore = False):
'''
Args:
filepath:
seconds:
split_on_underscore:
Returns:
a pandas timestamp
'''
f_stamp = get_time_from_filename_with_mcode(filepath, return_string=False, split_on_underscore=split_on_underscore)
time_stamp_combined = add_seconds_to_pandas_timestamp(seconds, f_stamp)
return time_stamp_combined
def filterArray(array,window_size = 51,order=3):
'''
Simple for-loop based indexing for savitzky_golay filter
Inputs:
array:
array.shape[1] are datapoints - each row a trace, columns the datapoints
array should be <= 3d
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
'''
import copy
fData = copy.copy(array)
ndimensions = len(fData.shape) # number of dimension
if ndimensions == 1:
fData = savitzky_golay(array,window_size,order)
elif ndimensions == 2:
for trace_i in range(array.shape[0]):
fData[trace_i,:] = savitzky_golay(array[trace_i,:],window_size,order)
elif ndimensions == 3:
for index in range(array.shape[2]):
for index2 in range(array.shape[1]):
fData[:,index2,index] = savitzky_golay(array[:,index2,index],window_size,order)
#else:
#print "Jonny only bothered too (badly) code up to 3 array dimensions!"
return fData
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
http://wiki.scipy.org/Cookbook/SavitzkyGolay
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid') | mit |
akrherz/iem | htdocs/plotting/auto/scripts/p3.py | 1 | 14640 | """Plot monthly data over all years"""
import calendar
import datetime
from collections import OrderedDict
import numpy as np
from pandas.io.sql import read_sql
from pyiem import network
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = OrderedDict(
[
("max-high", "Maximum High"),
("avg-high", "Average High"),
("std-high", "Standard Deviation High"),
("delta-high", "Average Day to Day High Change"),
("min-high", "Minimum High"),
("max-low", "Maximum Low"),
("avg-low", "Average Low"),
("std-low", "Standard Deviation Low"),
("delta-low", "Average Day to Day Low Change"),
("min-low", "Minimum Low"),
("avg-temp", "Average Temp"),
("std-temp", "Standard Deviation of Average Temp"),
("delta-temp", "Average Day to Day Avg Temp Change"),
("range-avghi-avglo", "Range between Average High + Average Low"),
("max-precip", "Maximum Daily Precip"),
("sum-precip", "Total Precipitation"),
(
"days-high-above",
"Days with High Temp Greater Than or Equal To (threshold)",
),
("days-high-below", "Days with High Temp Below (threshold)"),
(
"days-high-above-avg",
"Days with High Temp Greater Than or Equal To Average",
),
(
"days-lows-above",
"Days with Low Temp Greater Than or Equal To (threshold)",
),
("days-lows-below", "Days with Low Temp Below (threshold)"),
("days-lows-below-avg", "Days with Low Temp Below Average"),
]
)
PDICT2 = {"no": "Plot Yearly Values", "yes": "Plot Decadal Values"}
MDICT = OrderedDict(
[
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("1", "January"),
("2", "February"),
("3", "March"),
("4", "April"),
("5", "May"),
("6", "June"),
("7", "July"),
("8", "August"),
("9", "September"),
("10", "October"),
("11", "November"),
("12", "December"),
]
)
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc[
"description"
] = """This plot displays a single month's worth of data
over all of the years in the period of record. In most cases, you can
access the raw data for these plots
<a href="/climodat/" class="link link-info">here.</a> For the variables
comparing the daily temperatures against average, the average is taken
from the NCEI current 1981-2010 climatology.
<p>This page presents a number of statistical measures. In general, these
can be summarized as:
<ul>
<li><strong>Average:</strong> simple arithmetic mean</li>
<li><strong>Maximum:</strong> the largest single day value</li>
<li><strong>Standard Deviation:</strong> measure indicating the spread
within the population of daily values for each grouped period. Lower
values indicate less variability within the month or period.</li>
<li><strong>Average Day to Day:</strong> this is computed by sequentially
ordering the daily observations with time, computing the absolute value
between the current day and previous day and then averaging those values.
This is another measure of variability during the month.</li>
</ul></p>
<p>You can optionally summarize by decades. For this plot and for example,
the decade of the 90s represents the inclusive years 1990 thru 1999.
Please use care to specify start and end years that make sense for this
presentation. For example, if the year is only 2020, the 2020 decade
values would only have one year included!</p>
"""
today = datetime.date.today()
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0000",
label="Select Station",
network="IACLIMATE",
),
dict(
type="select",
name="month",
default=today.month,
label="Month/Season",
options=MDICT,
),
dict(
type="select",
name="type",
default="max-high",
label="Which metric to plot?",
options=PDICT,
),
dict(
type="float",
name="threshold",
default="-99",
label="Threshold (optional, specify when appropriate):",
),
dict(
type="year",
default=1890,
label="Potential Minimum Year (inclusive) to use in plot:",
name="syear",
min=1850,
),
dict(
type="year",
default=today.year,
label="Potential Maximum Year (inclusive) to use in plot:",
name="eyear",
min=1850,
),
dict(
type="select",
options=PDICT2,
name="decadal",
default="no",
label="Aggregate plot by decades:",
),
]
return desc
def highcharts(fdict):
"""Go high charts"""
ctx = get_context(fdict)
ptinterval = "10" if ctx["decadal"] else "1"
return (
"""
$("#ap_container").highcharts({
title: {text: '"""
+ ctx["title"]
+ """'},
subtitle: {text: '"""
+ ctx["subtitle"]
+ """'},
chart: {zoomType: 'x'},
tooltip: {shared: true},
xAxis: {title: {text: '"""
+ ctx["xlabel"]
+ """'}},
yAxis: {title: {text: '"""
+ ctx["ylabel"].replace(r"$^\circ$", "")
+ """'}},
series: [{
name: '"""
+ ctx["ptype"]
+ """',
type: 'column',
width: 0.8,
pointStart: """
+ str(ctx["df"].index.min())
+ """,
pointInterval: """
+ ptinterval
+ """,
tooltip: {
valueDecimals: 2
},
data: """
+ str(ctx["data"].tolist())
+ """
}, {
tooltip: {
valueDecimals: 2
},
name: '30 Year Trailing Avg',
pointStart: """
+ str(ctx["df"].index.min() + (3 if ctx["decadal"] else 30))
+ """,
pointInterval: """
+ ptinterval
+ """,
width: 2,
data: """
+ str(ctx["tavg"][(3 if ctx["decadal"] else 30) :])
+ """
},{
tooltip: {
valueDecimals: 2
},
name: 'Average',
width: 2,
pointPadding: 0.1,
pointStart: """
+ str(ctx["df"].index.min())
+ """,
pointInterval: """
+ ptinterval
+ """,
data: """
+ str([ctx["avgv"]] * len(ctx["df"].index))
+ """
}]
});
"""
)
def get_context(fdict):
"""Get the context"""
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
ctx["decadal"] = ctx.get("decadal") == "yes"
# Lower the start year if decadal
if ctx["decadal"]:
ctx["syear"] -= ctx["syear"] % 10
station = ctx["station"]
month = ctx["month"]
ptype = ctx["type"]
threshold = ctx["threshold"]
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
lag = "0 days"
if month == "fall":
months = [9, 10, 11]
label = "Fall (SON)"
elif month == "winter":
months = [12, 1, 2]
lag = "31 days"
label = "Winter (DJF)"
elif month == "spring":
months = [3, 4, 5]
label = "Spring (MAM)"
elif month == "summer":
months = [6, 7, 8]
label = "Summer (JJA)"
else:
months = [int(month)]
label = calendar.month_name[int(month)]
decagg = 10 if ctx["decadal"] else 1
df = read_sql(
f"""
WITH climo as (
SELECT to_char(valid, 'mmdd') as sday,
high, low from ncei_climate91 WHERE station = %s),
day2day as (
SELECT
extract(year from day + '{lag}'::interval)::int / {decagg} as myyear,
month,
abs(high - lag(high) OVER (ORDER by day ASC)) as dhigh,
abs(low - lag(low) OVER (ORDER by day ASC)) as dlow,
abs((high+low)/2. - lag((high+low)/2.)
OVER (ORDER by day ASC)) as dtemp
from {table} WHERE station = %s),
agg as (
SELECT myyear, avg(dhigh) as dhigh, avg(dlow) as dlow,
avg(dtemp) as dtemp from day2day WHERE month in %s GROUP by myyear),
agg2 as (
SELECT
extract(year from day + '{lag}'::interval)::int / {decagg} as myyear,
max(o.year) - min(o.year) + 1 as years,
max(o.high) as "max-high",
min(o.high) as "min-high",
avg(o.high) as "avg-high",
stddev(o.high) as "std-high",
max(o.low) as "max-low",
min(o.low) as "min-low",
avg(o.low) as "avg-low",
stddev(o.low) as "std-low",
avg((o.high + o.low)/2.) as "avg-temp",
stddev((o.high + o.low)/2.) as "std-temp",
max(o.precip) as "max-precip",
sum(o.precip) as "sum-precip",
avg(o.high) - avg(o.low) as "range-avghi-avglo",
sum(case when o.high >= %s then 1 else 0 end) as "days-high-above",
sum(case when o.high < %s then 1 else 0 end) as "days-high-below",
sum(case when o.high >= c.high then 1 else 0 end) as "days-high-above-avg",
sum(case when o.low >= %s then 1 else 0 end) as "days-lows-above",
sum(case when o.low < c.low then 1 else 0 end) as "days-lows-below-avg",
sum(case when o.low < %s then 1 else 0 end) as "days-lows-below"
from {table} o JOIN climo c on (o.sday = c.sday)
where station = %s and month in %s GROUP by myyear)
SELECT b.*, a.dhigh as "delta-high", a.dlow as "delta-low",
a.dtemp as "delta-temp" from agg a JOIN agg2 b
on (a.myyear = b.myyear) WHERE b.myyear * {decagg} >= %s
and b.myyear * {decagg} <= %s
ORDER by b.myyear ASC
""",
pgconn,
params=(
nt.sts[station]["ncei91"],
station,
tuple(months),
threshold,
threshold,
threshold,
threshold,
station,
tuple(months),
ctx["syear"],
ctx["eyear"],
),
index_col="myyear",
)
if df.empty:
raise NoDataFound("No data was found for query")
if ctx["decadal"]:
df.index = df.index * 10
# Need to fix the sum() operations above
for colname in [
"sum-precip",
"days-high-above",
"days-high-below",
"days-high-above-avg",
"days-lows-above",
"days-lows-below-avg",
"days-lows-below",
]:
df[colname] = df[colname] / df["years"]
# Figure out the max min values to add to the row
df2 = df[df[ptype] == df[ptype].max()]
if df2.empty:
raise NoDataFound("No data was found for query")
df = df.dropna()
xlabel = "Year, Max: %.2f %s%s" % (
df[ptype].max(),
df2.index.values[0],
"+" if len(df2.index) > 1 else "",
)
df2 = df[df[ptype] == df[ptype].min()]
xlabel += ", Min: %.2f %s%s" % (
df[ptype].min(),
df2.index.values[0],
"+" if len(df2.index) > 1 else "",
)
ctx["xlabel"] = xlabel
data = df[ptype].values
ctx["data"] = data
ctx["avgv"] = df[ptype].mean()
ctx["df"] = df
# Compute 30 year trailing average
tavg = [None] * 30
for i in range(30, len(data)):
tavg.append(np.average(data[i - 30 : i]))
if ctx["decadal"]:
tavg = [None] * 3
for i in range(3, len(data)):
tavg.append(np.average(data[i - 3 : i]))
ctx["tavg"] = tavg
# End interval is inclusive
ctx["a1981_2010"] = df.loc[1981:2010, ptype].mean()
ctx["ptype"] = ptype
ctx["station"] = station
ctx["threshold"] = threshold
ctx["month"] = month
ctx["title"] = ("[%s] %s %s-%s") % (
station,
nt.sts[station]["name"],
df.index.min(),
df.index.max(),
)
ctx["subtitle"] = ("%s %s") % (label, PDICT[ptype])
if ptype.find("days") == 0 and ptype.find("avg") == -1:
ctx["subtitle"] += " (%s)" % (threshold,)
units = r"$^\circ$F"
if ctx["ptype"].find("precip") > 0:
units = "inches"
elif ctx["ptype"].find("days") > 0:
units = "days"
ctx["ylabel"] = "%s [%s]" % (PDICT[ctx["ptype"]], units)
return ctx
def plotter(fdict):
"""Go"""
ctx = get_context(fdict)
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
colorabove = "tomato"
colorbelow = "dodgerblue"
precision = "%.1f"
if ctx["ptype"] in ["max-precip", "sum-precip"]:
colorabove = "dodgerblue"
colorbelow = "tomato"
precision = "%.2f"
bars = ax.bar(
ctx["df"].index.values,
ctx["data"],
color=colorabove,
align="edge",
width=9 if ctx["decadal"] else 1,
)
for i, mybar in enumerate(bars):
if ctx["data"][i] < ctx["avgv"]:
mybar.set_color(colorbelow)
lbl = "Avg: " + precision % (ctx["avgv"],)
ax.axhline(ctx["avgv"], lw=2, color="k", zorder=2, label=lbl)
lbl = "1981-2010: " + precision % (ctx["a1981_2010"],)
ax.axhline(ctx["a1981_2010"], lw=2, color="brown", zorder=2, label=lbl)
ax.plot(
ctx["df"].index.values,
ctx["tavg"],
lw=1.5,
color="g",
zorder=4,
label="Trailing 30yr",
)
ax.plot(
ctx["df"].index.values, ctx["tavg"], lw=3, color="yellow", zorder=3
)
ax.set_xlim(
ctx["df"].index.min() - 1,
ctx["df"].index.max() + (11 if ctx["decadal"] else 1),
)
if ctx["ptype"].find("precip") == -1 and ctx["ptype"].find("days") == -1:
ax.set_ylim(min(ctx["data"]) - 5, max(ctx["data"]) + 5)
ax.set_xlabel(ctx["xlabel"])
ax.set_ylabel(ctx["ylabel"])
ax.grid(True)
ax.legend(ncol=3, loc="best", fontsize=10)
ax.set_title("%s\n%s" % (ctx["title"], ctx["subtitle"]))
return fig, ctx["df"]
if __name__ == "__main__":
plotter(
dict(
station="IATDSM",
network="IACLIMATE",
type="max-high",
month="11",
threshold=-99,
decadal="yes",
)
)
| mit |
ivlukin/math_stat_python | problem4.py | 1 | 9154 | import numpy
import numpy.linalg as linal
from scipy.stats import f
from scipy.stats import t
from lib import pearson, read_column_from_csv, ess, rss
import matplotlib.pyplot as plt
variation = int(input("Введите ваш вариант:"))
k = 4
n = 40
file_path = 'data/4problem.csv'
def ls(var_to_calc, ridge=0, restricted=False):
"""
:param ridge: ridge coefficient
:param var_to_calc: variant from 1 to 10
:param restricted: model with restriction or not
:return: array of LS(least squares) coefficients
"""
x = get_x_matrix(var_to_calc, restricted)
# (X^T * X)
# numpy.dot is a matrix multiplication
# if ridge is none 0
if not restricted:
ar = numpy.zeros((4, 4), float)
numpy.fill_diagonal(ar, float(ridge))
x_step1 = numpy.dot(x.T, x) + ar
else:
x_step1 = numpy.dot(x.T, x)
# (X^T * X)^-1
x_step2 = linal.inv(x_step1)
# (X^T * X)^-1 * X^T
x_step3 = numpy.dot(x_step2, x.T)
# (X^T * X)^-1 * X^T * Y
y_1 = read_column_from_csv(column_number=3 + (var_to_calc - 1) * 4, file=file_path)
y_1 = numpy.array([y_1]).T
coefficient_vector = numpy.dot(x_step3, y_1)
return coefficient_vector.T[0]
def get_x_matrix(var_to_calc, restricted=False):
var_to_calc -= 1
x_2 = read_column_from_csv(column_number=0 + var_to_calc * 4, file=file_path)
x_3 = read_column_from_csv(column_number=1 + var_to_calc * 4, file=file_path)
x_4 = read_column_from_csv(column_number=2 + var_to_calc * 4, file=file_path)
y_1 = read_column_from_csv(column_number=3 + var_to_calc * 4, file=file_path)
len_of_data = len(y_1)
# vector of MSE coefficients is (X^T * X)^-1 * X^T * Y
# 1. make vector-column
x = numpy.ones((len_of_data, 1), dtype=float)
x_2 = numpy.array([x_2]).T
x_3 = numpy.array([x_3]).T
x_4 = numpy.array([x_4]).T
y_1 = numpy.array([y_1]).T
x = numpy.concatenate((x, x_2), axis=1)
if not restricted:
x = numpy.concatenate((x, x_3), axis=1)
x = numpy.concatenate((x, x_4), axis=1)
return x
def coefficient_variance(number_of_coefficient, y_arr, y_explained, x_matrix):
"""
:param number_of_coefficient: number from range(n)
:param y_arr: input y
:param y_explained: y explained array
:param x_matrix: x matrix from docs
:return: variance of coefficient
"""
variance_e_2 = rss(y_arr, y_explained) / (n - k)
# (X^T * X)^-1
mmatrix = linal.inv(numpy.dot(x_matrix.T, x_matrix))
v_matrix = numpy.dot(variance_e_2, mmatrix)
return v_matrix[number_of_coefficient][number_of_coefficient]
##############################################################
######### Оцените линейную зависимость y от x2, x3 и x4#######
####### методом наименьших квадратов #########################
##############################################################
print("""1. Оцените линейную зависимость y от x2, x3 и x4
методом наименьших квадратов""")
print()
# LS(least squares)
# b1 b2 b3 b4
coefficient_vector = ls(variation)
print("Коэфициенты b1 b2 b3 b4:")
print(coefficient_vector)
print()
##############################################################
######## Проверьте значимость регрессии в целом ##############
##############################################################
print("2. Проверьте значимость регрессии в целом")
print()
x_2 = read_column_from_csv(column_number=0 + (variation - 1) * 4, file=file_path)
x_3 = read_column_from_csv(column_number=1 + (variation - 1) * 4, file=file_path)
x_4 = read_column_from_csv(column_number=2 + (variation - 1) * 4, file=file_path)
y_1 = read_column_from_csv(column_number=3 + (variation - 1) * 4, file=file_path)
y_estimation = [coefficient_vector[0]
+ coefficient_vector[1] * x_2[i]
+ coefficient_vector[2] * x_3[i]
+ coefficient_vector[3] * x_4[i]
for i in range(40)]
ess_ur = ess(y_1, y_estimation)
rss_ur = rss(y_1, y_estimation)
# Fisher dist
f_crit = f.ppf(0.95, k - 1, n - k)
f_real = ess_ur / (k - 1) / (rss_ur / (n - k))
print('F (95%, k-1, n-4) is {}'.format(f_crit))
print('ess / (k - 1) / (rss / (n - k)) is {}'.format(f_real))
if f_crit < f_real:
print('Отвергаем гипотезу о значимости модели регрессии в целом (b1=b2=b3=b4=0)')
else:
print('Принимаем гипотезу о значимости модели регрессии в целом (b1=b2=b3=b4=0)')
print()
##############################################################
######## Проверьте значимость коэффициентов при объясняющих##
####### переменных по отдельности ############################
##############################################################
print('3. Проверьте значимость коэффициентов при объясняющих переменных по отдельности')
print()
t_critical = t.ppf(0.95, n - k)
print('Критическое значение t(n-k)~{}'.format(t_critical))
print()
for i in range(4):
t_val = (coefficient_vector[i] - 0) / (coefficient_variance(i, y_1, y_estimation, get_x_matrix(variation))) ** (1 / 2)
print("Гипотеза H0: коэффициент b{}=0".format(i + 1))
print('Критерий значимости для коэффициента b{}:\t{}'.format(i + 1, t_val))
if t_critical >= t_val >= -t_critical:
print('\tПринимаем гипотезу о том, что b{}=0'.format(i + 1))
else:
print('\tОтвергаем гипотезу о том, что b{}=0'.format(i + 1))
print()
##############################################################
######## Проверьте гипотезу о совместной значимости ##########
####### коэффициентов при переменных x3 и x4 #################
##############################################################
print('4. Проверьте гипотезу о совместной значимости коэффициентов при переменных x3 и x4')
print()
# it is the same model but assuming that x3 and x4 is equals to 0
# that is what we called r(Restricted by some condition like x3 = x4 = 0)
coefficient_vector_r = ls(variation, restricted=True)
y_estimation_r = [coefficient_vector_r[0]
+ coefficient_vector_r[1] * x_2[i]
for i in range(40)]
q = 2 # count of "=" in condition
# so condition is :
# x3=x4
# x3=0
# RSS Restricted
rss_r = rss(y_1, y_estimation_r)
f_r_critical = f.ppf(0.95, q, n - k)
f_r_real = ((rss_r - rss_ur) / q) / (rss_ur / (n - k))
print('F (95%, q, n-k) is {}'.format(f_r_critical))
print('(rss_r - rss_ur) / q / (rss_ur / (n - k)) is {}'.format(f_r_real))
if f_r_critical < f_r_real:
print('Отвергаем гипотезу о совместной значимости коэффициентов при переменных x3 и x4')
else:
print('Принимаем гипотезу о совместной значимости коэффициентов при переменных x3 и x4')
print()
##############################################################
######## Рассчитайте корреляционную матрицу ##################
######## для объясняющих переменных ##########################
##############################################################
print('5. Рассчитайте корреляционную матрицу для объясняющих переменных')
print()
corr_matrix = [y_1, x_2, x_3, x_4]
names = ["Y", "X2", "X3", "X4"]
tab = "\t"
print(tab + names[0] + tab + tab + names[1] + tab + tab + names[2] + tab + tab + names[3])
for i in range(4):
print(names[i], end=tab)
for j in range(4):
print("%.4f" % pearson(corr_matrix[i], corr_matrix[j]), end=tab)
print()
print()
##############################################################
######## Часть 2. Оценки Риджа ###############################
##############################################################
print('Постройте график зависимости оценок коэффициентов регрессии от λ')
print()
def draw_plot(sample_1, sample_2, numb):
plt.figure(num="B{}".format(numb))
plt.title("Dependence of b{} from lambda".format(numb))
plt.xlabel("lambda")
plt.ylabel("b{}".format(numb))
plt.plot(sample_1, sample_2)
plt.show()
lmd = []
b1 = []
b2 = []
b3 = []
b4 = []
for i in range(21):
res = ls(variation, ridge=(i / 10))
lmd.append(i / 10)
b1.append(res[0])
b2.append(res[1])
b3.append(res[2])
b4.append(res[3])
print("𝜆 = " + str(i / 10) + ";\t" + str(res))
print()
draw_plot(lmd, b1, 1)
draw_plot(lmd, b2, 2)
draw_plot(lmd, b3, 3)
draw_plot(lmd, b4, 4)
| apache-2.0 |
mxlei01/healthcareai-py | healthcareai/common/cardinality_checks.py | 4 | 6064 | """Cardinality Checks."""
from tabulate import tabulate
import pandas as pd
from healthcareai.common.healthcareai_error import HealthcareAIError
def calculate_cardinality(dataframe):
"""
Find cardinality of columns in a dataframe.
This function counts the number of rows in the dataframe, counts the unique
values in each column and sorts by the ratio of unique values relative to
the number of rows.
This is useful for profiling training data.
Args:
dataframe (pandas.core.frame.DataFrame):
Returns:
pandas.core.frame.DataFrame: dataframe sorted by cardinality (unique
count ratio)
"""
record_count = len(dataframe)
result_list = []
for column in dataframe:
count = len(dataframe[column].unique())
ordinal_ratio = count / record_count
result_list.append([column, count, ordinal_ratio])
results = pd.DataFrame(result_list)
results.columns = ['Feature Name', 'unique_value_count', 'unique_ratio']
results.sort_values('unique_ratio', ascending=False, inplace=True)
results.reset_index(inplace=True)
return results
def cardinality_threshold_filter(dataframe, ratio_name, warning_threshold=0.3):
"""
Filter a cardinality dataframe to rows that exceed a warning threshold.
Useful for warning on highly cardinal features.
Args:
dataframe (pandas.core.frame.DataFrame): The cardinality dataframe.
ratio_name (str): The name of the cardinality ratio column
warning_threshold (float): The ratio threshold above which to include.
Returns:
pandas.core.frame.DataFrame: A dataframe containing rows that meet or
exceed the threshold
"""
if warning_threshold > 1.0:
raise HealthcareAIError('The warning_threshold maximum is 1.0 and you '
'set it to {}'.format(warning_threshold))
warnings = dataframe[dataframe[ratio_name] >= warning_threshold]
results = warnings.drop(labels=['index'], axis=1)
return results
def check_high_cardinality(dataframe, exclusions, warning_threshold=0.3):
"""
Alert user if highly cardinal features are found.
This function calculates cardinality, and prints a warning to the console
to warn and educate user. This includes the features found, the unique
value counts, and the unique ratio.
It is important to note that we do not want to prevent training on highly
cardinal data, we just want to inform the user, therefore no errors are
raised.
Useful for profiling training data.
Args:
dataframe (pandas.core.frame.DataFrame): The raw dataframe before
data preparation
exclusions (list): A list of columns to ignore (like the grain)
warning_threshold (float): The warning threshold above which to alert
the user.
"""
row_count = len(dataframe)
if exclusions:
dataframe = dataframe.drop(exclusions, axis=1)
cardinality = calculate_cardinality(dataframe)
warnings = cardinality_threshold_filter(
cardinality, 'unique_ratio',
warning_threshold)
if len(warnings) > 0:
print(
'\n***************** High Cardinality Warning! ****************\n'
'- Your data contains features/columns with many unique values.\n'
'- This is referred to as high cardinality.\n'
'- Consider dropping these columns to help your model be more\n'
'generalizable to unseen data, and speed up training.\n'
'- Data contains {} rows:'.format(row_count))
name_and_counts = warnings[['Feature Name', 'unique_value_count']]
table = tabulate(
name_and_counts,
tablefmt='fancy_grid',
headers=warnings.columns,
showindex=False)
print(table)
print('\n')
def cardinality_low_filter(dataframe):
"""
Filter a cardinality dataframe to rows that have one cardinality.
Args:
dataframe (pandas.core.frame.DataFrame): The cardinality dataframe.
Returns:
pandas.core.frame.DataFrame: A dataframe containing one cardinal
features.
"""
try:
warnings = dataframe[dataframe.unique_value_count == 1]
results = warnings.drop(labels=['index'], axis=1)
return results
except AttributeError:
raise HealthcareAIError(
'Expected a dataframe with a `unique_value_count`key and found'
'none. Please verify the dataframe passed to this function.')
def check_one_cardinality(dataframe):
"""
Alert user if features with one cardinality are found.
This function calculates cardinality, and prints a warning to the console
to warn and educate user. This includes the features found, the unique
value counts, and the unique ratio.
It is important to note that we do not want to prevent training on one
cardinal data, we just want to inform the user, therefore no errors are
raised.
Useful for profiling training data.
Args:
dataframe (pandas.core.frame.DataFrame): The raw input dataframe.
"""
row_count = len(dataframe)
cardinality = calculate_cardinality(dataframe)
warnings = cardinality_low_filter(cardinality)
if len(warnings) > 0:
print(
'\n***************** Low Cardinality Warning! *****************\n'
'- Your data contains features/columns with no unique values.\n'
'- Your model can learn nothing from these features because they\n'
'are all identical.\n'
'- Consider dropping these features/columns to simplify the\n'
'model and speed up training.\n'
'- Data contains {} rows:'.format(row_count))
name_and_counts = warnings[['Feature Name', 'unique_value_count']]
table = tabulate(
name_and_counts,
tablefmt='fancy_grid',
headers=warnings.columns,
showindex=False)
print(table)
print('\n')
| mit |
Jimmy-Morzaria/scikit-learn | sklearn/datasets/tests/test_base.py | 39 | 5607 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
| bsd-3-clause |
jefffohl/nupic | examples/opf/clients/hotgym/anomaly/one_gym/run.py | 34 | 4938 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
| gpl-3.0 |
martinsch/vigra | vigranumpy/lib/__init__.py | 1 | 17175 | #######################################################################
#
# Copyright 2009-2010 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# [email protected] or
# [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
import sys, os
_vigra_path = os.path.abspath(os.path.dirname(__file__))
_vigra_doc_path = _vigra_path + '/doc/vigranumpy/index.html'
if sys.platform.startswith('win'):
# On Windows, add subdirectory 'dlls' to the PATH in order to find
# the DLLs vigranumpy depends upon. Since this directory appears
# at the end of PATH, already installed DLLs are always preferred.
_vigra_dll_path = _vigra_path + '/dlls'
if os.path.exists(_vigra_dll_path):
os.putenv('PATH', os.getenv('PATH') + os.pathsep + _vigra_dll_path)
def _fallbackModule(moduleName, message):
'''This function installs a fallback module with the given 'moduleName'.
All function calls into this module raise an ImportError with the
given 'message' that hopefully tells the user why the real module
was not available.
'''
import sys
moduleClass = vigranumpycore.__class__
class FallbackModule(moduleClass):
def __init__(self, name):
moduleClass.__init__(self, name)
self.__name__ = name
def __getattr__(self, name):
if name.startswith('__'):
return moduleClass.__getattribute__(self, name)
try:
return moduleClass.__getattribute__(self, name)
except AttributeError:
raise ImportError("""%s.%s: %s""" % (self.__name__, name, self.__doc__))
module = FallbackModule(moduleName)
sys.modules[moduleName] = module
module.__doc__ = """Import of module '%s' failed.\n%s""" % (moduleName, message)
if not os.path.exists(_vigra_doc_path):
_vigra_doc_path = "http://hci.iwr.uni-heidelberg.de/vigra/doc/vigranumpy/index.html"
__doc__ = '''VIGRA Computer Vision Library
HTML documentation is available in
%s
Help on individual functions can be obtained via their doc strings
as usual.
The following sub-modules group related functionality:
* arraytypes (VigraArray and axistags, automatically imported into 'vigra')
* ufunc (improved array arithmetic, automatically used by VigraArray)
* impex (image and array I/O)
* colors (color space transformations)
* filters (spatial filtering, e.g. smoothing)
* sampling (image and array re-sampling and interpolation)
* fourier (Fourier transform and Fourier domain filters)
* analysis (image analysis and segmentation)
* learning (machine learning and classification)
* noise (noise estimation and normalization)
* geometry (geometric algorithms, e.g. convex hull)
''' % _vigra_doc_path
from __version__ import version
import vigranumpycore
import arraytypes
import impex
import sampling
import filters
import analysis
import learning
import colors
import noise
import geometry
import optimization
sampling.ImagePyramid = arraytypes.ImagePyramid
try:
import fourier
except Exception, e:
_fallbackModule('vigra.fourier',
'''
%s
Make sure that the fftw3 libraries are found during compilation and import.
They may be downloaded at http://www.fftw.org/.''' % str(e))
import fourier
# import most frequently used functions
from arraytypes import *
standardArrayType = arraytypes.VigraArray
defaultAxistags = arraytypes.VigraArray.defaultAxistags
from impex import readImage, readVolume
def readHDF5(filenameOrGroup, pathInFile, order=None):
'''Read an array from an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name
of the dataset to be read, including intermediate groups. If the
first argument is a group object, the path is relative to this
group, otherwise it is relative to the file's root group.
If the dataset has an attribute 'axistags', the returned array
will have type :class:`~vigra.VigraArray` and will be transposed
into the given 'order' ('vigra.VigraArray.defaultOrder'
will be used if no order is given). Otherwise, the returned
array is a plain 'numpy.ndarray'. In this case, order='F' will
return the array transposed into Fortran order.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup, 'r')
group = file['/']
try:
dataset = group[pathInFile]
if not isinstance(dataset, h5py.highlevel.Dataset):
raise IOError("readHDF5(): '%s' is not a dataset" % pathInFile)
data = dataset.value
axistags = dataset.attrs.get('axistags', None)
if axistags is not None:
data = data.view(arraytypes.VigraArray)
data.axistags = arraytypes.AxisTags.fromJSON(axistags)
if order is None:
order = arraytypes.VigraArray.defaultOrder
data = data.transposeToOrder(order)
else:
if order == 'F':
data = data.transpose()
elif order not in [None, 'C', 'A']:
raise IOError("readHDF5(): unsupported order '%s'" % order)
finally:
if file is not None:
file.close()
return data
def writeHDF5(data, filenameOrGroup, pathInFile):
'''Write an array to an HDF5 file.
'filenameOrGroup' can contain a filename or a group object
referring to an already open HDF5 file. 'pathInFile' is the name of the
dataset to be written, including intermediate groups. If the first
argument is a group object, the path is relative to this group,
otherwise it is relative to the file's root group. If the dataset already
exists, it will be replaced without warning.
If 'data' has an attribute 'axistags', the array is transposed to
numpy order before writing. Moreover, the axistags will be
stored along with the data in an attribute 'axistags'.
Requirements: the 'h5py' module must be installed.
'''
import h5py
if isinstance(filenameOrGroup, h5py.highlevel.Group):
file = None
group = filenameOrGroup
else:
file = h5py.File(filenameOrGroup)
group = file['/']
try:
levels = pathInFile.split('/')
for groupname in levels[:-1]:
if groupname == '':
continue
g = group.get(groupname, default=None)
if g is None:
group = group.create_group(groupname)
elif not isinstance(g, h5py.highlevel.Group):
raise IOError("writeHDF5(): invalid path '%s'" % pathInFile)
else:
group = g
dataset = group.get(levels[-1], default=None)
if dataset is not None:
if isinstance(dataset, h5py.highlevel.Dataset):
del group[levels[-1]]
else:
raise IOError("writeHDF5(): cannot replace '%s' because it is not a dataset" % pathInFile)
try:
data = data.transposeToNumpyOrder()
except:
pass
dataset = group.create_dataset(levels[-1], data=data)
if hasattr(data, 'axistags'):
dataset.attrs['axistags'] = data.axistags.toJSON()
finally:
if file is not None:
file.close()
impex.readHDF5 = readHDF5
readHDF5.__module__ = 'vigra.impex'
impex.writeHDF5 = writeHDF5
writeHDF5.__module__ = 'vigra.impex'
from filters import convolve, gaussianSmoothing
from sampling import resize
# import enums
CLOCKWISE = sampling.RotationDirection.CLOCKWISE
COUNTER_CLOCKWISE = sampling.RotationDirection.COUNTER_CLOCKWISE
UPSIDE_DOWN = sampling.RotationDirection.UPSIDE_DOWN
CompleteGrow = analysis.SRGType.CompleteGrow
KeepContours = analysis.SRGType.KeepContours
StopAtThreshold = analysis.SRGType.StopAtThreshold
_selfdict = globals()
def searchfor(searchstring):
'''Scan all vigra modules to find classes and functions containing
'searchstring' in their name.
'''
for attr in _selfdict.keys():
contents = dir(_selfdict[attr])
for cont in contents:
if ( cont.upper().find(searchstring.upper()) ) >= 0:
print attr+"."+cont
# FIXME: use axistags here
def imshow(image):
'''Display a scalar or RGB image by means of matplotlib.
If the image does not have one or three channels, an exception is raised.
The image will be automatically scaled to the range 0...255 when its dtype
is not already 'uint8'.
'''
import matplotlib.pylab
if not hasattr(image, 'axistags'):
return matplotlib.pyplot.imshow(image)
image = image.transposeToNumpyOrder()
if image.channels == 1:
image = image.dropChannelAxis().view(numpy.ndarray)
plot = matplotlib.pyplot.imshow(image, cmap=matplotlib.cm.gray, \
norm=matplotlib.cm.colors.Normalize())
matplotlib.pylab.show()
return plot
elif image.channels == 3:
if image.dtype != numpy.uint8:
out = image.__class__(image.shape, dtype=numpy.uint8, axistags=image.axistags)
image = colors.linearRangeMapping(image, newRange=(0.0, 255.0), out=out)
plot = matplotlib.pyplot.imshow(image.view(numpy.ndarray))
matplotlib.pylab.show()
return plot
else:
raise RuntimeError("vigra.imshow(): Image must have 1 or 3 channels.")
# auto-generate code for additional Kernel generators:
def _genKernelFactories(name):
for oldName in dir(eval('filters.'+name)):
if not oldName.startswith('init'):
continue
#remove init from beginning and start with lower case character
newPrefix = oldName[4].lower() + oldName[5:]
if newPrefix == "explicitly":
newPrefix = "explict"
newName = newPrefix + 'Kernel'
if name == 'Kernel2D':
newName += '2D'
code = '''def %(newName)s(*args):
k = filters.%(name)s()
k.%(oldName)s(*args)
return k
%(newName)s.__doc__ = filters.%(name)s.%(oldName)s.__doc__
filters.%(newName)s=%(newName)s
''' % {'oldName': oldName, 'newName': newName, 'name': name}
exec code
_genKernelFactories('Kernel1D')
_genKernelFactories('Kernel2D')
del _genKernelFactories
# define watershedsUnionFind()
def _genWatershedsUnionFind():
def watershedsUnionFind(image, neighborhood=None, out = None):
'''Compute watersheds of an image using the union find algorithm.
If 'neighborhood' is 'None', it defaults to 8-neighborhood for 2D inputs
and 6-neighborhood for 3D inputs.
Calls :func:`watersheds` with parameters::\n\n
watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
'''
if neighborhood is None:
neighborhood = 8 if image.spatialDimensions == 2 else 6
return analysis.watersheds(image, neighborhood=neighborhood, method='UnionFind', out=out)
watershedsUnionFind.__module__ = 'vigra.analysis'
analysis.watershedsUnionFind = watershedsUnionFind
_genWatershedsUnionFind()
del _genWatershedsUnionFind
# define tensor convenience functions
def _genTensorConvenienceFunctions():
def hessianOfGaussianEigenvalues(image, scale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the Hessian of Gaussian at the given scale
for a scalar image or volume.
Calls :func:`hessianOfGaussian` and :func:`tensorEigenvalues`.
'''
hessian = filters.hessianOfGaussian(image, scale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(hessian, out=out)
hessianOfGaussianEigenvalues.__module__ = 'vigra.filters'
filters.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
def structureTensorEigenvalues(image, innerScale, outerScale, out=None,
sigma_d=0.0, step_size=1.0, window_size=0.0, roi=None):
'''Compute the eigenvalues of the structure tensor at the given scales
for a scalar or multi-channel image or volume.
Calls :func:`structureTensor` and :func:`tensorEigenvalues`.
'''
st = filters.structureTensor(image, innerScale, outerScale,
sigma_d=sigma_d, step_size=step_size,
window_size=window_size, roi=roi)
return filters.tensorEigenvalues(st, out=out)
structureTensorEigenvalues.__module__ = 'vigra.filters'
filters.structureTensorEigenvalues = structureTensorEigenvalues
_genTensorConvenienceFunctions()
del _genTensorConvenienceFunctions
# define feature convenience functions
def _genFeaturConvenienceFunctions():
def supportedFeatures(array):
'''Return a list of feature names that are available for the given array. These feature
names are the valid inputs to a call of :func:`extractFeatures`. E.g., to compute
just the first two features in the list, use::
f = vigra.analysis.supportedFeatures(array)
print "Computing features:", f[:2]
r = vigra.analysis.extractFeatures(array, features=f[:2])
'''
return analysis.extractFeatures(array, None).supportedFeatures()
supportedFeatures.__module__ = 'vigra.analysis'
analysis.supportedFeatures = supportedFeatures
def supportedRegionFeatures(array, labels):
'''Return a list of feature names that are available for the given array and label array.
These feature names are the valid inputs to a call of
:func:`extractRegionFeatures`. E.g., to compute just the first two features in the
list, use::
f = vigra.analysis.supportedRegionFeatures(array, labels)
print "Computing features:", f[:2]
r = vigra.analysis.extractRegionFeatures(array, labels, features=f[:2])
'''
return analysis.extractRegionFeatures(array, labels, None).supportedFeatures()
supportedRegionFeatures.__module__ = 'vigra.analysis'
analysis.supportedRegionFeatures = supportedRegionFeatures
# implement the read-only part of the 'dict' API in FeatureAccumulator and RegionFeatureAccumulator
def __len__(self):
return len(self.keys())
def __iter__(self):
return self.keys().__iter__()
def has_key(self, key):
try:
return self.isActive(key)
except:
return False
def values(self):
return [self[k] for k in self.keys()]
def items(self):
return [(k, self[k]) for k in self.keys()]
def iterkeys(self):
return self.keys().__iter__()
def itervalues(self):
for k in self.keys():
yield self[k]
def iteritems(self):
for k in self.keys():
yield (k, self[k])
for k in ['__len__', '__iter__', 'has_key', 'values', 'items', 'iterkeys', 'itervalues', 'iteritems']:
setattr(analysis.FeatureAccumulator, k, eval(k))
setattr(analysis.RegionFeatureAccumulator, k, eval(k))
_genFeaturConvenienceFunctions()
del _genFeaturConvenienceFunctions
| mit |
tiregram/algo-E3FI | tp4/graph_time.py | 1 | 1706 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import fifo
import random
import exo1
import time
class grap_time:
def get_the_time_function(func,data):
start_time = time.time()
func(**data)
return time.time() - start_time
def get_data(value_min,value_max,quantity):
elem = fifo.Deque(1)
for a in range(0,quantity):
elem.push_last(random.randint(value_min,value_max))
return elem
def data_gen(t=0):
cnt = 1
while cnt < 4000:
data = dict()
cnt += 1000
data["elemToTry"] = grap_time.get_data(0,100000,cnt)
print(cnt)
yield cnt , grap_time.get_the_time_function(exo1.insertion_sort,data)
def init():
ax.set_ylim(0, 10)
ax.set_xlim(90, 1000)
del xdata[:]
del ydata[:]
line.set_data(xdata,
ydata)
return line,
def run(data):
# update the data
t, y = data
xdata.append(t)
ydata.append(y)
xmin, xmax = ax.get_xlim()
if t >= xmax:
ax.set_xlim(xmin, 2*xmax)
ax.figure.canvas.draw()
line.set_data(xdata,ydata)
return line,
fig, ax = plt.subplots()
xdata, ydata = [], []
line, = ax.plot([], [], lw=2)
def draw():
ax.grid()
ani = animation.FuncAnimation(
fig,
grap_time.run,
grap_time.data_gen,
blit=False,
interval=1,
repeat=False,
init_func=grap_time.init)
plt.show()
draw();
| gpl-3.0 |
clu8/SoilCarbon | analyze_utils.py | 1 | 1905 | import pandas as pd
def convert_units(layers):
layers['bottom'] /= 100 # cm to m TODO check if it's cm
layers['top'] /= 100 # cm to m TODO check if it's cm
layers['orgc_value_avg'] /= 1000 # g/kg to percent
layers['bdfi_value_avg'] *= 1000 # kg/dm^3 to kg/m^3
layers['bdws_value_avg'] *= 1000 # kg/dm^3 to kg/m^3
return layers
def drop_same_profile_layers(layers, bad_layers):
'''
Given bad_layers, drops all layers in the same profiles from layers and returns the result.
'''
bad_layers_mask = layers['profile_id'].isin(bad_layers['profile_id'])
print(f'Dropped layers: {sum(bad_layers_mask)}')
return layers[~bad_layers_mask]
def drop_bad_data(layers, profiles):
bad_layer_mask = layers['top'].isnull() | layers['bottom'].isnull() | layers['orgc_value_avg'].isnull()
print(f'Dropping {sum(bad_layer_mask)} layers with null top, bottom, or orgc_value_avg. ')
layers = layers[~bad_layer_mask]
print('Finding layers with bad data and dropping those layers and all other layers in the same profile. ')
bad_layer_mask = (layers['top'] < 0) | (layers['bottom'] <= 0)
print(f'Layers with top < 0 or bottom <= 0, and layers in same profiles: {sum(bad_layer_mask)}')
layers = drop_same_profile_layers(layers, layers[bad_layer_mask])
bad_layer_mask = layers['orgc_value_avg'] == 0
print(f'Layers with orgc_value_avg = 0: {sum(bad_layer_mask)}')
layers = drop_same_profile_layers(layers, layers[bad_layer_mask])
print('Layers in profiles <40 cm with all orgc > 17%.')
print(f'Dropped layers: {sum(layers["peatland_manual"] == "BadData")}')
layers = layers[layers['peatland_manual'] != 'BadData']
return layers
def preprocess(layers, profiles):
layers = convert_units(layers)
layers = pd.merge(layers, profiles, on='profile_id')
layers = drop_bad_data(layers, profiles)
return layers
| mit |
ctogle/grapeipm_support | plot_fungal_risk_models.py | 1 | 6545 | #!/usr/bin/python2.7
import run_fungal_risk_models
import convert
import pdb,os,argparse,numpy,datetime,collections
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def plot_axes_xy(x = (-5,5),y = (-5,5),f = None,loc = '111',aspect = 'auto'):
if f is None:ax = plt.figure().add_subplot(loc)
else:ax = f.add_subplot(loc)
ax.set_xlim(*x)
ax.set_ylim(*y)
if aspect == 'equal':ax.set_aspect('equal')
return ax
def plot_sensor(hcfg,path,sensor,data,x_l = 'Time (Days)',y_l = 'Y',f = None):
timestamp_format = hcfg[sensor][1]
epoch = datetime.datetime.utcfromtimestamp(0)
begintime = data[0]['begintime'].strftime(timestamp_format)
endtime = data[-1]['endtime'].strftime(timestamp_format)
xs,yss = [],[[] for j in range(len(data[0].keys())-3)]
diseases = data[0].keys()
diseases.remove('begintime')
diseases.remove('endtime')
diseases.remove('hydrocode')
labels = ['None','Low','Moderate','High']
for dp in data:
st = (dp['begintime'] - epoch).total_seconds()
et = (dp['endtime'] - epoch).total_seconds()
xs.append(st);xs.append(et)
for j,d in enumerate(diseases):
y = dp[d] if not dp[d] in ('NULL','No-Read') else -1.0
if type(y) == type(''):y = labels.index(y)
yss[j].append(y);yss[j].append(y)
x = numpy.array(xs)/24.0*60.0*60.0;ys = numpy.array(yss)
x -= x.min()
diseasecount = ys.shape[0]//2
#my = (ys[1::2,:].min()-0.1,ys[1::2,:].max()+0.1)
my = (-1.1,1.1)
if f is None:f = plt.figure(figsize = (8,8))
else:f.clear()
ax2dl = plot_axes_xy(f = f,x = (x.min(),x.max()),y = my,loc = '211')
ax2dr = plot_axes_xy(f = f,x = (x.min(),x.max()),y = (-1.1,3.1),loc = '212')
colors = [matplotlib.cm.jet(k) for k in numpy.linspace(0,1,diseasecount)]
colors = [a for b in zip(colors,colors) for a in b]
for y,d,c in zip(ys,diseases,colors):
line = matplotlib.lines.Line2D(x,y,color = c,lw = 1)
if d.endswith('Index'):
line.set_label(d[:d.rfind(' Index')])
ax2dl.add_line(line)
elif d.endswith('Risk'):
line.set_label(d[:d.rfind(' Risk')])
ax2dr.add_line(line)
else:raise ValueError
mx = [x.min(),x.max()]
ax2dl.plot(mx,[0,0],ls = '--',lw = 1,color = 'black')
ax2dl.plot(mx,[0.01,0.01],ls = '--',lw = 1,color = 'black')
ax2dl.plot(mx,[0.5,0.5],ls = '--',lw = 1,color = 'black')
ax2dl.plot(mx,[1,1],ls = '--',lw = 1,color = 'black')
ax2dr.plot(mx,[-1,-1],ls = '--',lw = 1,color = 'black')
ax2dr.plot(mx,[ 0, 0],ls = '--',lw = 1,color = 'brown')
ax2dr.plot(mx,[ 1, 1],ls = '--',lw = 1,color = 'blue')
ax2dr.plot(mx,[ 2, 2],ls = '--',lw = 1,color = 'green')
ax2dr.plot(mx,[ 3, 3],ls = '--',lw = 1,color = 'red')
ax2dr.yaxis.set_label_position('right')
ax2dr.set_yticks([-1,0,1,2,3])
ax2dr.set_yticklabels(['No-Read']+labels)
ax2dl.legend();ax2dr.legend()
ax2dl.set_title('%s Disease Indices' % sensor)
ax2dr.set_title('%s Disease Risks' % sensor)
ax2dl.set_xlabel(x_l);ax2dr.set_xlabel(x_l)
ax2dl.set_ylabel('Index');ax2dr.set_ylabel('Risk')
fn = os.path.join(path,'%s.png' % sensor)
f.subplots_adjust(wspace = 0.2,hspace = 0.3)
f.subplots_adjust(left = 0.02,right = 0.98,top = 0.98,bottom = 0.2)
f.savefig(fn,dpi = 100,bbox_inches = 'tight')
def plot_measured(datapoints,hcfg,f = None):
pngpath = os.path.join(os.getcwd(),'measured_model_values')
if not os.path.exists(pngpath):os.mkdir(pngpath)
for hydrocode in datapoints:
plot_sensor(hcfg,pngpath,hydrocode,datapoints[hydrocode],f = f)
def plot_axes(f = None,x = (-5,5),y = (-5,5),z = (-5,5)):
if f is None:ax = plt.figure().add_subplot(111,projection = '3d')
else:ax = f.add_subplot(111,projection = '3d')
ax.set_xlim(*x)
ax.set_ylim(*y)
ax.set_zlim(*z)
#ax.set_zlim([-(9.0/16.0)*x,(9.0/16.0)*x])
return ax
def plot_model(path,model,x,y,p,zf,x_l = 'X',y_l = 'Y',z_l = 'Z',f = None):
X,Y = numpy.meshgrid(x,y)
z = numpy.array([zf(p,x,y) for x,y in zip(numpy.ravel(X),numpy.ravel(Y))])
Z = z.reshape(X.shape)
if f is None:f = plt.figure()
else:f.clear()
ax3d = plot_axes(f = f,
x = (X.min(),X.max()),
y = (Y.min(),Y.max()),
z = (Z.min(),Z.max()))
ax3d.plot_surface(X,Y,Z,rstride = 1,cstride = 1)
ax3d.set_xlabel(x_l)
ax3d.set_ylabel(y_l)
ax3d.set_zlabel(z_l)
#print('xbounds',(X.min(),X.max()))
#print('ybounds',(Y.min(),Y.max()))
#print('zbounds',(Z.min(),Z.max()))
for a in xrange(0,360,15):
ax3d.view_init(elev = 20,azim = a)
fn = '%s_%d.png' % (model.replace(' ','_').lower(),a)
fn = os.path.join(path,fn)
f.savefig(fn,dpi = 100)
def plot_theoreticals(f = None):
models = run_fungal_risk_models.models
wd,t = numpy.linspace(4,20,17),numpy.linspace(12,30,19)
wd_l,t_l = 'Wetness Duration (hrs)','Temperature (C)'
pngpath = os.path.join(os.getcwd(),'theoretical_model_values')
if not os.path.exists(pngpath):os.mkdir(pngpath)
for disease,model in models.items():
z_l = '%s Disease Index' % disease.title()
mf = models[disease].diseaseindex
p = models[disease].parameters
plot_model(pngpath,disease,wd,t,p,mf,wd_l,t_l,z_l,f = f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('configfile',
help = 'specify a parsing configuration file')
parser.add_argument('-i','--inputfiles',
help = 'optionally specify an input data file')
parser.add_argument('-s','--startdate',default = '2010-01-01 00:00:00',
help = 'specify a starting time stamp for relevant new data points')
parser.add_argument('-e','--enddate',default = '2020-01-01 00:00:00',
help = 'specify an ending time stamp for relevant new data points')
parser.add_argument('-c','--hydrocodes',
help = 'specify a hydrocode for all new data points being processed')
args = parser.parse_args()
cfg,hcfg = convert.parse_config(args.configfile)
f = plt.figure(figsize = (8,8))
plot_theoreticals(f)
datapoints = collections.OrderedDict()
ifiles = args.inputfiles.split(',')
for ifile in ifiles:
run_fungal_risk_models.load_data(ifile,datapoints,hcfg,args)
f = plt.figure(figsize = (8,8))
plot_measured(datapoints,hcfg,f)
| mit |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/sankey.py | 11 | 40247 | #!/usr/bin/env python
"""
Module for creating Sankey diagrams using matplotlib
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Original version by Yannick Copin ([email protected]) 10/2/2010, available
# at:
# http://matplotlib.org/examples/api/sankey_demo_old.html
# Modifications by Kevin Davies ([email protected]) 6/3/2011:
# --Used arcs for the curves (so that the widths of the paths are uniform)
# --Converted the function to a class and created methods to join multiple
# simple Sankey diagrams
# --Provided handling for cases where the total of the inputs isn't 100
# Now, the default layout is based on the assumption that the inputs sum to
# 1. A scaling parameter can be used in other cases.
# --The call structure was changed to be more explicit about layout,
# including the length of the trunk, length of the paths, gap between the
# paths, and the margin around the diagram.
# --Allowed the lengths of paths to be adjusted individually, with an option
# to automatically justify them
# --The call structure was changed to make the specification of path
# orientation more flexible. Flows are passed through one array, with
# inputs being positive and outputs being negative. An orientation
# argument specifies the direction of the arrows. The "main"
# inputs/outputs are now specified via an orientation of 0, and there may
# be several of each.
# --Added assertions to catch common calling errors
# --Added the physical unit as a string argument to be used in the labels, so
# that the values of the flows can usually be applied automatically
# --Added an argument for a minimum magnitude below which flows are not shown
# --Added a tapered trunk in the case that the flows do not sum to 0
# --Allowed the diagram to be rotated
import numpy as np
from matplotlib.cbook import iterable, Bunch
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import verbose
from matplotlib import docstring
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey:
"""
Sankey diagram in matplotlib
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <http://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
Optional keyword arguments:
=============== ===================================================
Field Description
=============== ===================================================
*ax* axes onto which the data should be plotted
If *ax* isn't provided, new axes will be created.
*scale* scaling factor for the flows
*scale* sizes the width of the paths in order to
maintain proper layout. The same scale is applied
to all subdiagrams. The value should be chosen
such that the product of the scale and the sum of
the inputs is approximately 1.0 (and the product of
the scale and the sum of the outputs is
approximately -1.0).
*unit* string representing the physical unit associated
with the flow quantities
If *unit* is None, then none of the quantities are
labeled.
*format* a Python number formatting string to be used in
labeling the flow as a quantity (i.e., a number
times a unit, where the unit is given)
*gap* space between paths that break in/break away
to/from the top or bottom
*radius* inner radius of the vertical paths
*shoulder* size of the shoulders of output arrowS
*offset* text offset (from the dip or tip of the arrow)
*head_angle* angle of the arrow heads (and negative of the angle
of the tails) [deg]
*margin* minimum space between Sankey outlines and the edge
of the plot area
*tolerance* acceptable maximum of the magnitude of the sum of
flows
The magnitude of the sum of connected flows cannot
be greater than *tolerance*.
=============== ===================================================
The optional arguments listed above are applied to all subdiagrams so
that there is consistent alignment and formatting.
If :class:`Sankey` is instantiated with any keyword arguments other
than those explicitly listed above (``**kwargs``), they will be passed
to :meth:`add`, which will create the first subdiagram.
In order to draw a complex Sankey diagram, create an instance of
:class:`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
.. seealso::
:meth:`add`
:meth:`finish`
**Examples:**
.. plot:: mpl_examples/api/sankey_demo_basics.py
"""
# Check the arguments.
assert gap >= 0, (
"The gap is negative.\nThis isn't allowed because it "
"would cause the paths to overlap.")
assert radius <= gap, (
"The inner radius is greater than the path spacing.\n"
"This isn't allowed because it would cause the paths to overlap.")
assert head_angle >= 0, (
"The angle is negative.\nThis isn't allowed "
"because it would cause inputs to look like "
"outputs and vice versa.")
assert tolerance >= 0, (
"The tolerance is negative.\nIt must be a magnitude.")
# Create axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*quadrant* uses 0-based indexing (0, 1, 2, or 3)
*cw* if True, clockwise
*center* (x, y) tuple of the arc's center
=============== ==========================================
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0,90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
#[6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant == 0 or quadrant == 2:
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
elif quadrant == 1 or quadrant == 3:
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign = 1
else:
sign = -1
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
if angle == UP:
quadrant = 3
else:
quadrant = 0
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply revertable by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
#path[1] = path[1][-1:0:-1]
#path[1][0] = first_action
#path[2] = path[2][::-1]
#return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Return value is the instance of :class:`Sankey`.
Optional keyword arguments:
=============== ===================================================
Keyword Description
=============== ===================================================
*patchlabel* label to be placed at the center of the diagram
Note: *label* (not *patchlabel*) will be passed to
the patch through ``**kwargs`` and can be used to
create an entry in the legend.
*flows* array of flow values
By convention, inputs are positive and outputs are
negative.
*orientations* list of orientations of the paths
Valid values are 1 (from/to the top), 0 (from/to
the left or right), or -1 (from/to the bottom). If
*orientations* == 0, inputs will break in from the
left and outputs will break away to the right.
*labels* list of specifications of the labels for the flows
Each value may be *None* (no labels), '' (just
label the quantities), or a labeling string. If a
single value is provided, it will be applied to all
flows. If an entry is a non-empty string, then the
quantity for the corresponding flow will be shown
below the string. However, if the *unit* of the
main diagram is None, then quantities are never
shown, regardless of the value of this argument.
*trunklength* length between the bases of the input and output
groups
*pathlengths* list of lengths of the arrows before break-in or
after break-away
If a single value is given, then it will be applied
to the first (inside) paths on the top and bottom,
and the length of all other arrows will be
justified accordingly. The *pathlengths* are not
applied to the horizontal inputs and outputs.
*prior* index of the prior diagram to which this diagram
should be connected
*connect* a (prior, this) tuple indexing the flow of the
prior diagram and the flow of this diagram which
should be connected
If this is the first diagram or *prior* is *None*,
*connect* will be ignored.
*rotation* angle of rotation of the diagram [deg]
*rotation* is ignored if this diagram is connected
to an existing one (using *prior* and *connect*).
The interpretation of the *orientations* argument
will be rotated accordingly (e.g., if *rotation*
== 90, an *orientations* entry of 1 means to/from
the left).
=============== ===================================================
Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
%(Patch)s
As examples, ``fill=False`` and ``label='A legend entry'``.
By default, ``facecolor='#bfd1d4'`` (light blue) and
``linewidth=0.5``.
The indexing parameters (*prior* and *connect*) are zero-based.
The flows are placed along the top of the diagram from the inside out
in order of their index within the *flows* list or array. They are
placed along the sides of the diagram from the top down and along the
bottom from the outside in.
If the the sum of the inputs and outputs is nonzero, the discrepancy
will appear as a cubic Bezier curve along the top and bottom edges of
the trunk.
.. seealso::
:meth:`finish`
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = [0, 0]
assert len(orientations) == n, (
"orientations and flows must have the same length.\n"
"orientations has length %d, but flows has length %d."
% (len(orientations), n))
if labels != '' and getattr(labels, '__iter__', False):
# iterable() isn't used because it would give True if labels is a
# string
assert len(labels) == n, (
"If labels is a list, then labels and flows must have the "
"same length.\nlabels has length %d, but flows has length %d."
% (len(labels), n))
else:
labels = [labels] * n
assert trunklength >= 0, (
"trunklength is negative.\nThis isn't allowed, because it would "
"cause poor layout.")
if np.absolute(np.sum(flows)) > self.tolerance:
verbose.report(
"The sum of the flows is nonzero (%f).\nIs the "
"system not at steady state?" % np.sum(flows), 'helpful')
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if not (0.5 <= gain <= 2.0):
verbose.report(
"The scaled sum of the inputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if not (-2.0 <= loss <= -0.5):
verbose.report(
"The scaled sum of the outputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if prior is not None:
assert prior >= 0, "The index of the prior diagram is negative."
assert min(connect) >= 0, (
"At least one of the connection indices is negative.")
assert prior < len(self.diagrams), (
"The index of the prior diagram is %d, but there are "
"only %d other diagrams.\nThe index is zero-based."
% (prior, len(self.diagrams)))
assert connect[0] < len(self.diagrams[prior].flows), (
"The connection index to the source diagram is %d, but "
"that diagram has only %d flows.\nThe index is zero-based."
% (connect[0], len(self.diagrams[prior].flows)))
assert connect[1] < n, (
"The connection index to this diagram is %d, but this diagram"
"has only %d flows.\n The index is zero-based."
% (connect[1], n))
assert self.diagrams[prior].angles[connect[0]] is not None, (
"The connection cannot be made. Check that the magnitude "
"of flow %d of diagram %d is greater than or equal to the "
"specified tolerance." % (connect[0], prior))
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
assert abs(flow_error) < self.tolerance, (
"The scaled sum of the connected flows is %f, which is not "
"within the tolerance (%f)." % (flow_error, self.tolerance))
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
verbose.report(
"The magnitude of flow %d (%f) is below the "
"tolerance (%f).\nIt will not be shown, and it "
"cannot be used in a connection."
% (i, flow, self.tolerance), 'helpful')
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
assert orient == -1, (
"The value of orientations[%d] is %d, "
"but it must be -1, 0, or 1." % (i, orient))
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if iterable(pathlengths):
assert len(pathlengths) == n, (
"If pathlengths is a list, then pathlengths and flows must "
"have the same length.\npathlengths has length %d, but flows "
"has length %d." % (len(pathlengths), n))
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if not is_input:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and not is_input:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and not is_input:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and not is_input:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = list(zip(*path))
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_point
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_point
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_point
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if False: # Debug
print("llpath\n", llpath)
print("ulpath\n", self._revert(ulpath))
print("urpath\n", urpath)
print("lrpath\n", self._revert(lrpath))
xs, ys = list(zip(*vertices))
self.ax.plot(xs, ys, 'go-')
patch = PathPatch(Path(vertices, codes),
fc=kwargs.pop('fc', kwargs.pop('facecolor',
'#bfd1d4')), # Custom defaults
lw=kwargs.pop('lw', kwargs.pop('linewidth', 0.5)),
**kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
quantity = self.format % abs(number) + self.unit
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
tips=tips, text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the axes and return a list of information about the Sankey
subdiagram(s).
Return value is a list of subdiagrams represented with the following
fields:
=============== ===================================================
Field Description
=============== ===================================================
*patch* Sankey outline (an instance of
:class:`~maplotlib.patches.PathPatch`)
*flows* values of the flows (positive for input, negative
for output)
*angles* list of angles of the arrows [deg/90]
For example, if the diagram has not been rotated,
an input to the top side will have an angle of 3
(DOWN), and an output from the top side will have
an angle of 1 (UP). If a flow has been skipped
(because its magnitude is less than *tolerance*),
then its angle will be *None*.
*tips* array in which each row is an [x, y] pair
indicating the positions of the tips (or "dips") of
the flow paths
If the magnitude of a flow is less the *tolerance*
for the instance of :class:`Sankey`, the flow is
skipped and its tip will be at the center of the
diagram.
*text* :class:`~matplotlib.text.Text` instance for the
label of the diagram
*texts* list of :class:`~matplotlib.text.Text` instances
for the labels of flows
=============== ===================================================
.. seealso::
:meth:`add`
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
| mit |
MJuddBooth/pandas | pandas/tests/indexes/interval/test_construction.py | 2 | 15259 | from __future__ import division
from functools import partial
import numpy as np
import pytest
from pandas.compat import lzip
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
Categorical, CategoricalIndex, Float64Index, Index, Int64Index, Interval,
IntervalIndex, date_range, notna, period_range, timedelta_range)
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
import pandas.util.testing as tm
@pytest.fixture(params=[None, 'foo'])
def name(request):
return request.param
class Base(object):
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.parametrize('breaks', [
[3, 14, 15, 92, 653],
np.arange(10, dtype='int64'),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range('20180101', periods=10),
date_range('20180101', periods=10, tz='US/Eastern'),
timedelta_range('1 day', periods=10)])
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, 'dtype', 'int64')
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize('breaks, subtype', [
(Int64Index([0, 1, 2, 3, 4]), 'float64'),
(Int64Index([0, 1, 2, 3, 4]), 'datetime64[ns]'),
(Int64Index([0, 1, 2, 3, 4]), 'timedelta64[ns]'),
(Float64Index([0, 1, 2, 3, 4]), 'int64'),
(date_range('2017-01-01', periods=5), 'int64'),
(timedelta_range('1 day', periods=5), 'int64')])
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype)
for dtype in (iv_dtype, str(iv_dtype)):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize('breaks', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype='datetime64[ns]'),
np.array([], dtype='timedelta64[ns]')])
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, 'dtype', np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize('breaks', [
tuple('0123456789'),
list('abcdefghij'),
np.array(list('abcdefghij'), dtype=object),
np.array(list('abcdefghij'), dtype='<U1')])
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with pytest.raises(TypeError, match=msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize('cat_constructor', [
Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype='int64')
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "invalid option for 'closed': invalid"
with pytest.raises(ValueError, match=msg):
constructor(closed='invalid', **filler)
# unsupported dtype
msg = 'dtype must be an IntervalDtype, got int64'
with pytest.raises(TypeError, match=msg):
constructor(dtype='int64', **filler)
# invalid dtype
msg = "data type 'invalid' not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype='invalid', **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range('2000-01-01', periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with pytest.raises(ValueError, match=msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = 'left side of interval must be <= right side'
with pytest.raises(ValueError, match=msg):
constructor(**decreasing_kwargs)
class TestFromArrays(Base):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {'left': breaks[:-1], 'right': breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list('01234abcde'), ordered=True)
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = 'left and right must have the same length'
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize('left_subtype, right_subtype', [
(np.int64, np.float64), (np.float64, np.int64)])
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(Base):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {'breaks': breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list('01234abcde'), ordered=True)
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
class TestFromTuples(Base):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {'data': breaks}
tuples = lzip(breaks[:-1], breaks[1:])
if isinstance(breaks, (list, tuple)):
return {'data': tuples}
elif is_categorical_dtype(breaks):
return {'data': breaks._constructor(tuples)}
return {'data': com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = 'IntervalIndex.from_tuples received an invalid item, 2'
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = 'IntervalIndex.from_tuples requires tuples of length 2, got {t}'
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an elemenent
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(Base):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(params=[IntervalIndex, partial(Index, dtype='interval')],
ids=['IntervalIndex', 'Index'])
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {'data': breaks}
ivs = [Interval(l, r, closed) if notna(l) else l
for l, r in zip(breaks[:-1], breaks[1:])]
if isinstance(breaks, list):
return {'data': ivs}
elif is_categorical_dtype(breaks):
return {'data': breaks._constructor(ivs)}
return {'data': np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, closed='right'), Interval(2, 3, closed='left')]
msg = 'intervals must all be closed on the same side'
with pytest.raises(ValueError, match=msg):
constructor(ivs)
# scalar
msg = (r'IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with pytest.raises(TypeError, match=msg):
constructor(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with pytest.raises(TypeError, match=msg):
constructor([0, 1])
@pytest.mark.parametrize('data, closed', [
([], 'both'),
([np.nan, np.nan], 'neither'),
([Interval(0, 3, closed='neither'),
Interval(2, 5, closed='neither')], 'left'),
([Interval(0, 3, closed='left'),
Interval(2, 5, closed='right')], 'neither'),
(IntervalIndex.from_breaks(range(5), closed='both'), 'right')])
def test_override_inferred_closed(self, constructor, data, closed):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
expected = IntervalIndex.from_tuples(tuples, closed=closed)
result = constructor(data, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('values_constructor', [
list, np.array, IntervalIndex, IntervalArray])
def test_index_object_dtype(self, values_constructor):
# Index(intervals, dtype=object) is an Index (not an IntervalIndex)
intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)]
values = values_constructor(intervals)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
class TestFromIntervals(TestClassConstructors):
"""
Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
IntervalIndex constructor. Same tests as the IntervalIndex constructor,
plus deprecation test. Should only need to delete this class when removed.
"""
@pytest.fixture
def constructor(self):
def from_intervals_ignore_warnings(*args, **kwargs):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
return IntervalIndex.from_intervals(*args, **kwargs)
return from_intervals_ignore_warnings
def test_deprecated(self):
ivs = [Interval(0, 1), Interval(1, 2)]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
IntervalIndex.from_intervals(ivs)
@pytest.mark.skip(reason='parent class test that is not applicable')
def test_index_object_dtype(self):
pass
| bsd-3-clause |
datapythonista/pandas | pandas/tests/reshape/merge/test_join.py | 2 | 31165 | import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
merge,
)
import pandas._testing as tm
from pandas.tests.reshape.merge.test_merge import (
NGROUPS,
N,
get_test_data,
)
a_ = np.array
class TestJoin:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
# exclude a couple keys for fun
self.df = self.df[self.df["key2"] > 1]
self.df2 = DataFrame(
{
"key1": get_test_data(n=N // 5),
"key2": get_test_data(ngroups=NGROUPS // 2, n=N // 5),
"value": np.random.randn(N // 5),
}
)
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame(
{"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"]
)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="left")
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="left")
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="right")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="right")
joined_both = merge(self.df, self.df2, how="right")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="right")
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="outer")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="outer")
joined_both = merge(self.df, self.df2, how="outer")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="outer")
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="inner")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="inner")
joined_both = merge(self.df, self.df2, how="inner")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="inner")
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on="key2", suffixes=(".foo", ".bar"))
assert "key1.foo" in joined
assert "key1.bar" in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(
self.df,
self.df2,
left_on="key2",
right_on="key1",
suffixes=(".foo", ".bar"),
)
assert "key1.foo" in joined
assert "key2.bar" in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on="C")
tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False)
tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
joined = df.join(df2, on="key")
expected = DataFrame(
{"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]}
)
tm.assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"])
df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"])
df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"])
joined = df_a.join(df_b, on="one")
joined = joined.join(df_c, on="one")
assert np.isnan(joined["two"]["c"])
assert np.isnan(joined["three"]["c"])
# merge column not p resent
with pytest.raises(KeyError, match="^'E'$"):
target.join(source, on="E")
# overlap
source_copy = source.copy()
source_copy["A"] = 0
msg = (
"You are trying to merge on float64 and object columns. If "
"you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on="A")
def test_join_on_fails_with_different_right_index(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on="a", right_index=True)
def test_join_on_fails_with_different_left_index(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)},
index=tm.makeCustomIndex(3, 2),
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)}
)
msg = r'len\(right_on\) must equal the number of levels in the index of "left"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="b", left_index=True)
def test_join_on_fails_with_different_column_counts(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="a", left_on=["a", "b"])
@pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])])
def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# GH12081 - original issue
# GH21220 - merging of Series and DataFrame is now allowed
# Edited test to remove the Series object from test parameters
df = DataFrame({"a": [1, 1]})
msg = (
"Can only merge Series or DataFrame objects, "
f"a {type(wrong_type)} was passed"
)
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on="a", right_on="a")
with pytest.raises(TypeError, match=msg):
merge(df, wrong_type, left_on="a", right_on="a")
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on="C")
del expected["C"]
join_col = self.target.pop("C")
result = self.target.join(self.source, on=join_col)
tm.assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on="C")
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on="C", how="inner")
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1]}, index=["a", "b"])
joined = df.join(df2, on="key", how="inner")
expected = df.join(df2, on="key")
expected = expected[expected["value"].notna()]
tm.assert_series_equal(joined["key"], expected["key"])
tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
# corner cases
joined = df.join(df2, on=["key"])
expected = df.join(df2, on="key")
tm.assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source["MergedA"], on="C")
expected = self.target.join(self.source[["MergedA"]], on="C")
tm.assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({"a": [1, 1]})
ds = Series([2], index=[1], name="b")
result = df.join(ds, on="a")
expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1["bool"] = True
df1["string"] = "foo"
df2 = DataFrame(index=np.arange(5, 15))
df2["int"] = 1
df2["float"] = 1.0
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
tm.assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
tm.assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(10),
columns=["A", "B", "C", "D"],
)
assert df1["B"].dtype == np.int64
assert df1["D"].dtype == np.bool_
df2 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(0, 10, 2),
columns=["A", "B", "C", "D"],
)
# overlap
joined = df1.join(df2, lsuffix="_one", rsuffix="_two")
expected_columns = [
"A_one",
"B_one",
"C_one",
"D_one",
"A_two",
"B_two",
"C_two",
"D_two",
]
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
tm.assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(np.random.randn(30, 2), columns=["a", "b"])
c = Series(np.random.randn(30))
a["c"] = c
d = DataFrame(np.random.randn(30, 1), columns=["q"])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays(
[["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
index2 = MultiIndex.from_arrays(
[["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
df1 = DataFrame(data=np.random.randn(6), index=index1, columns=["var X"])
df2 = DataFrame(data=np.random.randn(6), index=index2, columns=["var Y"])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how="outer")
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how="outer").sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = [
"two",
"one",
"three",
"one",
"two",
"one",
"two",
"two",
"three",
"one",
]
data = np.random.randn(len(key1))
data = DataFrame({"key1": key1, "key2": key2, "data": data})
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
to_join = DataFrame(
np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"]
)
joined = data.join(to_join, on=["key1", "key2"], how="inner")
expected = merge(
data,
to_join.reset_index(),
left_on=["key1", "key2"],
right_on=["first", "second"],
how="inner",
sort=False,
)
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
tm.assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
expected = expected.drop(["first", "second"], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
tm.assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"])
new_df = df.groupby(["a"]).agg({"b": [np.mean, np.sum]})
other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"])
other_df.set_index("a", inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(FutureWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ("b", "mean") in result
assert "b" in result
def test_join_float64_float32(self):
a = DataFrame(np.random.randn(10, 2), columns=["a", "b"], dtype=np.float64)
b = DataFrame(np.random.randn(10, 1), columns=["c"], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes["a"] == "float64"
assert joined.dtypes["b"] == "float64"
assert joined.dtypes["c"] == "float32"
a = np.random.randint(0, 5, 100).astype("int64")
b = np.random.random(100).astype("float64")
c = np.random.random(100).astype("float32")
df = DataFrame({"a": a, "b": b, "c": c})
xpdf = DataFrame({"a": a, "b": b, "c": c})
s = DataFrame(np.random.random(5).astype("float32"), columns=["md"])
rs = df.merge(s, left_on="a", right_index=True)
assert rs.dtypes["a"] == "int64"
assert rs.dtypes["b"] == "float64"
assert rs.dtypes["c"] == "float32"
assert rs.dtypes["md"] == "float32"
xp = xpdf.merge(s, left_on="a", right_index=True)
tm.assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="outer")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer")
result = result.reset_index()
expected = expected[result.columns]
expected["a"] = expected.a.astype("int64")
expected["b"] = expected.b.astype("int64")
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="inner")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner")
result = result.reset_index()
tm.assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
s = Series(
np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST"
)
inner = df.join(s, how="inner")
outer = df.join(s, how="outer")
left = df.join(s, how="left")
right = df.join(s, how="right")
tm.assert_frame_equal(inner, outer)
tm.assert_frame_equal(inner, left)
tm.assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
joined = left.join(right, on="key", sort=True)
expected = DataFrame(
{
"key": ["bar", "baz", "foo", "foo"],
"value": [2, 3, 1, 4],
"value2": ["a", "b", "c", "c"],
},
index=[1, 2, 0, 3],
)
tm.assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on="key", sort=False)
tm.assert_index_equal(joined.index, Index(range(4)), exact=True)
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"])
df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame(
{"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, "a"],
)
tm.assert_frame_equal(result, expected)
df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"])
df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"]
)
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.join(df2, how="inner", rsuffix="_df2")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum", "pnum_df2"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6), columns=["a", "b", "c", "d", "e", "f"])
df.insert(0, "id", 0)
df.insert(5, "dt", "foo")
grouped = df.groupby("id")
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix="_right")
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list("abcdef"))
df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how="outer")
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how="inner")
_check_diff_index(df_list, joined, df.index[2:8])
msg = "Joining multiple DataFrames only supported for joining on index"
with pytest.raises(ValueError, match=msg):
df_list[0].join(df_list[1:], on="a")
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=["A", "B", "C", "D"])
df["key"] = ["foo", "bar"] * 4
df1 = df.loc[:, ["A", "B"]]
df2 = df.loc[:, ["C", "D"]]
df3 = df.loc[:, ["key"]]
result = df1.join([df2, df3])
tm.assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix="_2")
result.columns = expected.columns
tm.assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer"
)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ["x_x", "y_x", "x_y", "y_y", "x_x", "y_x", "x_y", "y_y"]
tm.assert_frame_equal(dta, expected)
def test_join_multi_to_multi(self, join_type):
# GH 20475
leftindex = MultiIndex.from_product(
[list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"]
)
left = DataFrame({"v1": range(12)}, index=leftindex)
rightindex = MultiIndex.from_product(
[list("abc"), list("xy")], names=["abc", "xy"]
)
right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
result = left.join(right, on=["abc", "xy"], how=join_type)
expected = (
left.reset_index()
.merge(right.reset_index(), on=["abc", "xy"], how=join_type)
.set_index(["abc", "xy", "num"])
)
tm.assert_frame_equal(expected, result)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
left.join(right, on="xy", how=join_type)
with pytest.raises(ValueError, match=msg):
right.join(left, on=["abc", "xy"], how=join_type)
def test_join_on_tz_aware_datetimeindex(self):
# GH 23931, 26335
df1 = DataFrame(
{
"date": pd.date_range(
start="2018-01-01", periods=5, tz="America/Chicago"
),
"vals": list("abcde"),
}
)
df2 = DataFrame(
{
"date": pd.date_range(
start="2018-01-03", periods=5, tz="America/Chicago"
),
"vals_2": list("tuvwx"),
}
)
result = df1.join(df2.set_index("date"), on="date")
expected = df1.copy()
expected["vals_2"] = Series([np.nan] * 2 + list("tuv"), dtype=object)
tm.assert_frame_equal(result, expected)
def test_join_datetime_string(self):
# GH 5647
dfa = DataFrame(
[
["2012-08-02", "L", 10],
["2012-08-02", "J", 15],
["2013-04-06", "L", 20],
["2013-04-06", "J", 25],
],
columns=["x", "y", "a"],
)
dfa["x"] = pd.to_datetime(dfa["x"])
dfb = DataFrame(
[["2012-08-02", "J", 1], ["2013-04-06", "L", 2]],
columns=["x", "y", "z"],
index=[2, 4],
)
dfb["x"] = pd.to_datetime(dfb["x"])
result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"])
expected = DataFrame(
[
[Timestamp("2012-08-02 00:00:00"), "J", 1, 15],
[Timestamp("2013-04-06 00:00:00"), "L", 2, 20],
],
index=[2, 4],
columns=["x", "y", "z", "a"],
)
tm.assert_frame_equal(result, expected)
def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"):
# some smoke tests
for c in join_col:
assert result[c].notna().all()
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError as err:
if how in ("left", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError as err:
if how in ("right", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [
c for c in group.columns if c in columns or c.replace(suffix, "") in columns
]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ""))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert len(rows) == len(source)
assert all(tuple(row) in rows for row in svalues)
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert join_chunk[c].isna().all()
def _join_by_hand(a, b, how="left"):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
def test_join_inner_multiindex_deterministic_order():
# GH: 36910
left = DataFrame(
data={"e": 5},
index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")),
)
right = DataFrame(
data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c"))
)
result = left.join(right, how="inner")
expected = DataFrame(
{"e": [5], "f": [6]},
index=MultiIndex.from_tuples([(2, 1, 4, 3)], names=("b", "a", "d", "c")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
)
def test_join_cross(input_col, output_cols):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({input_col: [3, 4]})
result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y")
expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
tm.assert_frame_equal(result, expected)
def test_join_multiindex_one_level(join_type):
# GH#36909
left = DataFrame(
data={"c": 3}, index=MultiIndex.from_tuples([(1, 2)], names=("a", "b"))
)
right = DataFrame(data={"d": 4}, index=MultiIndex.from_tuples([(2,)], names=("b",)))
result = left.join(right, how=join_type)
expected = DataFrame(
{"c": [3], "d": [4]},
index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories, values",
[
(["Y", "X"], ["Y", "X", "X"]),
([2, 1], [2, 1, 1]),
([2.5, 1.5], [2.5, 1.5, 1.5]),
(
[Timestamp("2020-12-31"), Timestamp("2019-12-31")],
[Timestamp("2020-12-31"), Timestamp("2019-12-31"), Timestamp("2019-12-31")],
),
],
)
def test_join_multiindex_not_alphabetical_categorical(categories, values):
# GH#38502
left = DataFrame(
{
"first": ["A", "A"],
"second": Categorical(categories, categories=categories),
"value": [1, 2],
}
).set_index(["first", "second"])
right = DataFrame(
{
"first": ["A", "A", "B"],
"second": Categorical(values, categories=categories),
"value": [3, 4, 5],
}
).set_index(["first", "second"])
result = left.join(right, lsuffix="_left", rsuffix="_right")
expected = DataFrame(
{
"first": ["A", "A"],
"second": Categorical(categories, categories=categories),
"value_left": [1, 2],
"value_right": [3, 4],
}
).set_index(["first", "second"])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
xzturn/caffe2 | caffe2/contrib/cuda-convnet2/shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
danielru/pySDC | projects/FastWaveSlowWave/plot_stability.py | 1 | 5867 | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from matplotlib.patches import Polygon
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.datatype_classes.complex_mesh import mesh, rhs_imex_mesh
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.core.Step import step
# noinspection PyShadowingNames
def compute_stability():
"""
Routine to compute the stability domains of different configurations of fwsw-SDC
Returns:
numpy.ndarray: lambda_slow
numpy.ndarray: lambda_fast
int: number of collocation nodes
int: number of iterations
numpy.ndarray: stability numbers
"""
N_s = 100
N_f = 400
lam_s_max = 5.0
lam_f_max = 12.0
lambda_s = 1j * np.linspace(0.0, lam_s_max, N_s)
lambda_f = 1j * np.linspace(0.0, lam_f_max, N_f)
problem_params = dict()
# SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ###
problem_params['lambda_s'] = np.array([0.0])
problem_params['lambda_f'] = np.array([0.0])
problem_params['u0'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
# SET TYPE AND NUMBER OF QUADRATURE NODES ###
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = 3
sweeper_params['do_coll_update'] = True
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = swfw_scalar # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['dtype_u'] = mesh # pass data type for u
description['dtype_f'] = rhs_imex_mesh # pass data type for f
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = dict() # pass step parameters
# SET NUMBER OF ITERATIONS - SET K=0 FOR COLLOCATION SOLUTION ###
K = 3
# now the description contains more or less everything we need to create a step
S = step(description=description)
L = S.levels[0]
Q = L.sweep.coll.Qmat[1:, 1:]
nnodes = L.sweep.coll.num_nodes
dt = L.params.dt
stab = np.zeros((N_f, N_s), dtype='complex')
for i in range(0, N_s):
for j in range(0, N_f):
lambda_fast = lambda_f[j]
lambda_slow = lambda_s[i]
if K is not 0:
lambdas = [lambda_fast, lambda_slow]
# LHS, RHS = L.sweep.get_scalar_problems_sweeper_mats(lambdas=lambdas)
Mat_sweep = L.sweep.get_scalar_problems_manysweep_mat(nsweeps=K, lambdas=lambdas)
else:
# Compute stability function of collocation solution
Mat_sweep = np.linalg.inv(np.eye(nnodes) - dt * (lambda_fast + lambda_slow) * Q)
if L.sweep.params.do_coll_update:
stab_fh = 1.0 + (lambda_fast + lambda_slow) * L.sweep.coll.weights.dot(
Mat_sweep.dot(np.ones(nnodes)))
else:
q = np.zeros(nnodes)
q[nnodes - 1] = 1.0
stab_fh = q.dot(Mat_sweep.dot(np.ones(nnodes)))
stab[j, i] = stab_fh
return lambda_s, lambda_f, sweeper_params['num_nodes'], K, stab
# noinspection PyShadowingNames
def plot_stability(lambda_s, lambda_f, num_nodes, K, stab):
"""
Plotting routine of the stability domains
Args:
lambda_s (numpy.ndarray): lambda_slow
lambda_f (numpy.ndarray): lambda_fast
num_nodes (int): number of collocation nodes
K (int): number of iterations
stab (numpy.ndarray): stability numbers
"""
lam_s_max = np.amax(lambda_s.imag)
lam_f_max = np.amax(lambda_f.imag)
rcParams['figure.figsize'] = 1.5, 1.5
fs = 8
fig = plt.figure()
levels = np.array([0.25, 0.5, 0.75, 0.9, 1.1])
CS1 = plt.contour(lambda_s.imag, lambda_f.imag, np.absolute(stab), levels, colors='k', linestyles='dashed')
CS2 = plt.contour(lambda_s.imag, lambda_f.imag, np.absolute(stab), [1.0], colors='k')
# Set markers at points used in plot_stab_vs_k
plt.plot(4, 10, 'x', color='k', markersize=fs - 4)
plt.plot(1, 10, 'x', color='k', markersize=fs - 4)
plt.clabel(CS1, inline=True, fmt='%3.2f', fontsize=fs - 2)
manual_locations = [(1.5, 2.5)]
if K > 0: # for K=0 and no 1.0 isoline, this crashes Matplotlib for somer reason
plt.clabel(CS2, inline=True, fmt='%3.2f', fontsize=fs - 2, manual=manual_locations)
plt.gca().add_patch(Polygon([[0, 0], [lam_s_max, 0], [lam_s_max, lam_s_max]], visible=True, fill=True,
facecolor='.75', edgecolor='k', linewidth=1.0, zorder=11))
plt.gca().set_xticks(np.arange(0, int(lam_s_max) + 1))
plt.gca().set_yticks(np.arange(0, int(lam_f_max) + 2, 2))
plt.gca().tick_params(axis='both', which='both', labelsize=fs)
plt.xlim([0.0, lam_s_max])
plt.ylim([0.0, lam_f_max])
plt.xlabel('$\Delta t \lambda_{slow}$', fontsize=fs, labelpad=0.0)
plt.ylabel('$\Delta t \lambda_{fast}$', fontsize=fs, labelpad=0.0)
plt.title(r'$M=%1i$, $K=%1i$' % (num_nodes, K), fontsize=fs)
filename = 'data/stability-K' + str(K) + '-M' + str(num_nodes) + '.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
lambda_s, lambda_f, num_nodes, K, stab = compute_stability()
plot_stability(lambda_s, lambda_f, num_nodes, K, stab)
| bsd-2-clause |
TiKeil/Master-thesis-LOD | python_files/generate_figures/8.1_MsExampleLOD.py | 1 | 5777 | # This file is part of the master thesis "Variational crimes in the Localized orthogonal decomposition method":
# https://github.com/TiKeil/Masterthesis-LOD.git
# Copyright holder: Tim Keil
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import scipy.sparse as sparse
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter, MultipleLocator
from matplotlib import cm
from gridlod import util, world, fem, coef, interp
from gridlod.world import World
import pg_rand
import femsolverCoarse
import buildcoef2d
def PGsolver(world, ABase, f,k):
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
#interpolant
IPatchGenerator = lambda i, N: interp.L2ProjectionPatchMatrix(i, N, NWorldCoarse, NCoarseElement, boundaryConditions)
#Coefficient (need flatten form)
aCoef = coef.coefficientFine(NWorldCoarse, NCoarseElement, ABase)
pglod = pg_rand.VcPetrovGalerkinLOD(aCoef, world, k, IPatchGenerator, 0)
pglod.originCorrectors(clearFineQuantities=False)
KFull = pglod.assembleMsStiffnessMatrix()
MFull = fem.assemblePatchMatrix(NWorldCoarse, world.MLocCoarse)
free = util.interiorpIndexMap(NWorldCoarse)
bFull = MFull*f
KFree = KFull[free][:,free]
bFree = bFull[free]
xFree = sparse.linalg.spsolve(KFree, bFree)
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
basisCorrectors = pglod.assembleBasisCorrectors()
modifiedBasis = basis - basisCorrectors
xFull = np.zeros(NpCoarse)
xFull[free] = xFree
uLodCoarse = xFull
uLodFine = modifiedBasis*xFull
return uLodCoarse, uLodFine
# Example from Peterseim, Variational Multiscale Stabilization and the Exponential Decay of correctors, p. 2
# Two modifications: A with minus and u(here) = 1/4*u(paper).
fine = 1024
NFine = np.array([fine])
NpFine = np.prod(NFine+1)
NList = [2,4,8,16,32,64]
epsilon = 2**(-5)
pi = np.pi
xt = util.tCoordinates(NFine).flatten() #whats that mean values of the intervalls xp
xp = util.pCoordinates(NFine).flatten()
aFine = (2 - np.cos(2*pi*xt/epsilon))**(-1)
uSol = 4*(xp - xp**2) - 4*epsilon*(1/(4*pi)*np.sin(2*pi*xp/epsilon) -
1/(2*pi)*xp*np.sin(2*pi*xp/epsilon) -
epsilon/(4*pi**2)*np.cos(2*pi*xp/epsilon) +
epsilon/(4*pi**2))
uSol = uSol/4
plt.figure('Coefficient')
plt.plot(xt,aFine, label='$A_{\epsilon}(x)$')
plt.yticks((0,np.max(aFine)+np.min(aFine)),fontsize="small")
plt.ylabel('$y$', fontsize="small")
plt.xlabel('$x$', fontsize="small")
plt.legend(frameon=False,fontsize="large")
newErrorFine = []
x = []
y = []
for k in range(2,5):
newErrorFine = []
x = []
y = []
for N in NList:
NWorldCoarse = np.array([N])
boundaryConditions = np.array([[0, 0]])
NCoarseElement = NFine/NWorldCoarse
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
AFine = fem.assemblePatchMatrix(NFine, world.ALocFine, aFine)
#grid nodes
xpCoarse = util.pCoordinates(NWorldCoarse).flatten()
NpCoarse = np.prod(NWorldCoarse+1)
f = np.ones(NpCoarse)
uCoarseFull, uLodCoarse = PGsolver(world,aFine,f,k)
newErrorFine.append(np.sqrt(np.dot(uSol - uLodCoarse, AFine*(uSol - uLodCoarse))))
x.append(N)
y.append(1./N)
if k == 4:
if np.size(x)==1:
plt.figure('FEM-Solutions')
plt.subplots_adjust(left=0.01,bottom=0.04,right=0.99,top=0.95,wspace=0,hspace=0.2)
plt.subplot(231)
elif np.size(x)==2:
plt.subplot(232)
elif np.size(x)==3:
plt.subplot(233)
elif np.size(x)==4:
plt.subplot(234)
elif np.size(x)==5:
plt.subplot(235)
elif np.size(x)==6:
plt.subplot(236)
plt.plot(xp,uSol,'k', label='$u_{\epsilon}(x)$')
plt.plot(xpCoarse,uCoarseFull,'o--', label= '$u^{PG}(x)$')
plt.title('1/H= ' + str(N),fontsize="small")
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
plt.legend(frameon=False,fontsize="small")
#extension of the plot
x1 = []
y = []
for i in [2,4,8,16,32,64,128,256,512,1024]:
x1.append(i)
y.append(1./i)
plt.figure("Error")
if k == 1:
plt.loglog(x,newErrorFine,'-o', basex=2, basey=2, label = '$k=1$')
plt.loglog(x1,y,'--k',basex=2, basey=2, linewidth=1, alpha=0.3)
if k == 2:
plt.loglog(x,newErrorFine,'-o', basex=2, basey=2, label = '$k=2$')
if k == 3:
plt.loglog(x,newErrorFine,'-*', basex=2, basey=2, label = '$k=3$')
if k == 4:
plt.loglog(x,newErrorFine,'--o', basex=2, basey=2, label = '$k=4$')
plt.loglog(x1,y,'--k',basex=2, basey=2, linewidth=1, alpha=0.3)
if k == 5:
plt.loglog(x,newErrorFine,'-o', basex=2, basey=2, label = '$k=5$')
if k == 6:
plt.loglog(x,newErrorFine,'-o', basex=2, basey=2, label = '$k=6$')
plt.grid(True,which="both",ls="--")
plt.ylabel('Error', fontsize="small")
plt.xlabel('1/H', fontsize="small")
plt.legend(frameon=False,fontsize="small") #Legende
plt.show() | apache-2.0 |
abhisg/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/applications/plot_prediction_latency.py | 85 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
geodynamics/burnman | examples/example_dataset_uncertainties.py | 2 | 2443 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2019 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_holland_powell_uncertainties
------------------------------------
This extremely short example script shows how one can visualize
and manipulate uncertainties in zero-point energies as found in the
Holland and Powell dataset.
*Uses:*
* :doc:`mineral_database`
*Demonstrates:*
* creating basic layer
* calculating thermoelastic properties with self-consistent pressures
* seismic comparison
"""
from __future__ import absolute_import
# Here we import standard python modules that are required for
# usage of BurnMan.
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
# Here we import the relevant modules from BurnMan.
from burnman.minerals import HP_2011_ds62
from burnman.nonlinear_fitting import plot_cov_ellipse # for plotting
assert burnman_path # silence pyflakes warning
plt.style.use('ggplot')
# First, we read in the covariance matrix
cov = HP_2011_ds62.cov()
# Now we can find the desired rows and columns of the covariance matrix
# by cross-referencing against the list of mineral names
# (quartz, periclase, enstatite)
indices = [cov['endmember_names'].index(name) for name in ['q', 'per', 'en']]
# Slice the required rows and columns from the covariance matrix
Cov_selected = cov['covariance_matrix'][np.ix_(indices,indices)]
# The following line transforms the covariance matrix so that we can look
# at the uncertainties associated with the endmember reaction
# quartz + periclase = 0.5*enstatite
A = np.array([[1., 1., 0.],
[0., 0., 0.5]])
cov_transformed = A.dot(Cov_selected).dot(A.T)
sigma_x = np.sqrt(cov_transformed[0][0])
sigma_y = np.sqrt(cov_transformed[1][1])
corr_xy = cov_transformed[0][1]/sigma_x/sigma_y
print('sigma(q+per) = {0:.2f} J/mol'.format(sigma_x))
print('sigma(en/2) = {0:.2f} J/mol'.format(sigma_y))
print('corr(q+per,en/2) = {0:.2f}'.format(corr_xy))
# Finally, we plot the covariance matrix
fig = plt.figure(figsize = (5, 5))
ax = fig.add_subplot(1, 1, 1)
plot_cov_ellipse(cov_transformed, [0., 0.], nstd=1., ax=ax)
ax.set_xlim(-500., 500.)
ax.set_ylim(-500., 500.)
ax.set_xlabel('$\sigma_{q+per}$ (J/mol MgSiO$_3$)')
ax.set_ylabel('$\sigma_{en/2}$ (J/mol MgSiO$_3$)')
plt.show()
| gpl-2.0 |
xiaoxq/apollo | modules/tools/mapshow/roadshow.py | 3 | 1332 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.pyplot as plt
from modules.tools.mapshow.libs.map import Map
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Raodshow is a tool to display road info on a map.",
prog="roadshow.py")
parser.add_argument(
"-m", "--map", action="store", type=str, required=True,
help="Specify the map file in txt or binary format")
args = parser.parse_args()
map = Map()
map.load(args.map)
map.draw_roads(plt)
plt.axis('equal')
plt.show()
| apache-2.0 |
NWine/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
sumspr/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
13anjou/Research-Internship | Extraction/SlopeStudy.py | 1 | 20006 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import numpy #Module servant à faire les regressions linéaires
from pylab import * #Pour le tracé
import matplotlib.pyplot as plt
from decimal import*
import gc
import csv
def optimisation_decoupage(data,k,tailleBin,nom,rank,sN,dictionnaire) : #data correspond au vecteur d'entrée, k à la variable de poids
clf()
gc.collect()
donnees=list()
longueurs=list()
poidsDesPentesAvant = list()
data.pop()
tailleBin.pop()
rank.pop()
donnees=data
del data
gc.collect()
data=donnees
del donnees
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 04/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
data.reverse()
rank.reverse()
tailleBin.reverse()
gc.collect()
longueur=len(data)
w=0 #w est le poids total de la structure
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 10/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#solution = [0]*(longueur+1) #la liste contenant les bornes de notre solution
#pentes = [0]*(2*(longueur+1)) #la liste contenant les pentes et ordonnees à l'origine de chaque segment de courbe
#poids_segments = [0]*(longueur+1) #Les poids de chaque segment
solution = [0]*(longueur+2) #la liste contenant les bornes de notre solution
pentes = [0]*(2*(longueur+2)) #la liste contenant les pentes et ordonnees à l'origine de chaque segment de courbe
poids_segments = [0]*(longueur+2) #Les poids de chaque segment
i=1 #le premier indice libre dans solution, le premier 0 correspond au début du premier segment
n=0 #l'indice de dots
compteur=0
a_considerer=True
nb_sautes=0
while n<(longueur+1) :
#print(n)
compteur=compteur+1
p_avec=0
if a_considerer :
if n==0: #On initialise
print(longueur)
solution[i]=5 #Le premier point est la première borne du nouveau segment
#On récupère directement les 5 premiers points pour avoir un segment de courbe significatif
i=i+1
a_considerer=False
nb_sautes=0
w,a,b,poids_segments=poids(solution,k,data,poids_segments,tailleBin,rank)
else :
avec = solution
avec[i-1]=avec[i-1]+1 #On ajoute le point au segment en cours
poids_segments_avec=poids_segments
p_avec,a,b,poids_segments_avec=poids(avec,k,data,poids_segments_avec,tailleBin,rank) #On calcul le poids de la nouvelle solution
#print("on compare le poids relatif {} avec le poids précédent pondéré {}".format((p_avec+k*abs(pentes[2*i-4]-a)),w*k*10))
#if p_avec<w*k+3 : #Si la solution nouvelle a un poids plus faible. MODIFICATION 27/11
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 27/11
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if p_avec<k : #Si la solution nouvelle a un poids plus faible.
#print("on choisit de conserver ce nouveau vecteur")
solution = avec #On conserve cette solution
pentes[2*i-4]=a #On change la pente du segment qui lui colle le mieux avec la nouvelle valeur
pentes[2*i-3]=b #Idem avec l'ordonnée à l'origine
poids_segments=poids_segments_avec
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 27/11
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#poids_segments[i-1]=poids_segments[i-1]/10 MODIFICATION 27/11
print("les donnees des poids sont : {}".format(poids_segments))
w=p_avec #On conserve le poids de la nouvelle solution
else :
#print("on conserve l'ancien segment et on cree une nouvelle courbe")
print("les poids valent {}".format(poids_segments))
solution[i-1]=solution[i-1]-1
solution[i]=min(n+5,longueur-1) #Sinon on démare un nouveau segment : n est l'indice du premier point de ce segment
i=i+1 #On va chercher la borne suivante
a_considerer=False
nb_sautes=0
n=n+1
else :
n=n+1
nb_sautes=nb_sautes+1
if (nb_sautes==5) :
w,a,b,poids_segments=poids(solution,k,data,poids_segments,tailleBin,rank)
pentes[2*i-4]=a #On change la pente du segment qui lui colle le mieux avec la nouvelle valeur
pentes[2*i-3]=b #Idem avec l'ordonnée à l'origine
a_considerer=True
elif n==longueur-1 :
solution[i-1]=solution[i-1]-1
w,a,b,poids_segments=poids(solution,k,data,poids_segments,tailleBin,rank)
pentes[2*i-4]=a #On change la pente du segment qui lui colle le mieux avec la nouvelle valeur
pentes[2*i-3]=b #Idem avec l'ordonnée à l'origine
a_considerer=True
if n==(longueur):
solution[i]=n
print("les donnees des courbes sont {}".format(pentes))
break
l=len(data) #Préparation des axes
indice=-1
for dot in solution : #On enleve les derniers points qui correspondent a des bins partiels
indice=indice+1
if dot==100 :
solution[indice]=98
indice=0
clf()
indice = 0
t=0
#z=scatter(rank,data)
longueur3=len(pentes) #On va supprimer lse zeros inutiles et artificiels
while indice<longueur3-1 :
if pentes[indice+1]==0:
break
indice=indice+1
t=(indice+1)/2
pentes=pentes[0:2*t]
solution=solution[0:t+1]
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 04/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
print(pentes)
print(solution)
print(rank)
pentes = inverserPentes(pentes)
solution=inverserSol(solution)
data.reverse()
rank.reverse()
tailleBin.reverse()
#print(pentes)
print(solution)
print(data)
#print(rank)
z=scatter(rank,data) #On trace
indice=0 #Mauvais intervalle
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 10/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if solution != [] :
y1=linspace(0,rank[solution[1]-1],20)
print(0,rank[solution[1]-1],pentes[0],pentes[1],solution[1]-1)
plot(y1,pentes[0]*(y1)+pentes[1])
longueurs = longueurs + [rank[solution[1]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(0,solution[1]-1,pentes[0],pentes[1],data,rank)]
if t>1 :
y2=linspace(rank[solution[1]-1],rank[solution[2]-1],20)
plot(y2,pentes[2]*(y2)+pentes[3])
longueurs = longueurs + [rank[solution[2]-1]-rank[solution[1]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[1]-1,solution[2]-1,pentes[2],pentes[3],data,rank)]
if t>2 :
y3=linspace(rank[solution[2]-1],rank[solution[3]-1],20)
plot(y3,pentes[4]*(y3)+pentes[5])
longueurs = longueurs + [rank[solution[3]-1]-rank[solution[2]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[2]-1,solution[3]-1,pentes[4],pentes[5],data,rank)]
if t>3 :
y4=linspace(rank[solution[3]-1],rank[solution[4]-1],20)
plot(y4,pentes[6]*(y4)+pentes[7])
longueurs = longueurs + [rank[solution[4]-1]-rank[solution[3]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[3]-1,solution[4]-1,pentes[6],pentes[7],data,rank)]
if t>4 :
y5=linspace(rank[solution[4]-1],rank[solution[5]-1],20)
plot(y5,pentes[8]*(y5)+pentes[9])
longueurs = longueurs + [rank[solution[5]-1]-rank[solution[4]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[4]-1,solution[5]-1,pentes[8],pentes[9],data,rank)]
if t>5 :
y6=linspace(rank[solution[5]-1],rank[solution[6]-1],20)
plot(y6,pentes[10]*(y6)+pentes[11])
longueurs = longueurs + [rank[solution[6]-1]-rank[solution[5]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[5]-1,solution[6]-1,pentes[10],pentes[11],data,rank)]
if t>6 :
y7=linspace(rank[solution[6]-1],rank[solution[7]-1],20)
plot(y7,pentes[12]*(y7)+pentes[13])
longueurs = longueurs + [rank[solution[7]-1]-rank[solution[6]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[6]-1,solution[7]-1,pentes[12],pentes[13],data,rank)]
if t>7 :
y8=linspace(rank[solution[7]-1],rank[solution[8]-1],20)
plot(y8,pentes[14]*(y8)+pentes[15])
longueurs = longueurs + [rank[solution[8]-1]-rank[solution[7]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[7]-1,solution[8]-1,pentes[14],pentes[15],data,rank)]
if t>8 :
y9=linspace(rank[solution[8]-1],rank[solution[9]-1],20)
plot(y9,pentes[16]*(y9)+pentes[17])
longueurs = longueurs + [rank[solution[9]-1]-rank[solution[8]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[8]-1,solution[9]-1,pentes[16],pentes[17],data,rank)]
if t>9 :
y10=linspace(rank[solution[9]-1],rank[solution[10]-1],20)
plot(y10,pentes[18]*(y10)+pentes[19])
longueurs = longueurs + [rank[solution[10]-1]-rank[solution[9]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[9]-1,solution[10]-1,pentes[18],pentes[19],data,rank)]
if t>10:
y11=linspace(rank[solution[10]-1],rank[solution[11]-1],20)
plot(y11,pentes[20]*(y11)+pentes[21])
longueurs = longueurs + [rank[solution[11]-1]-rank[solution[10]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[10]-1,solution[11]-1,pentes[20],pentes[21],data,rank)]
if t>11:
y11=linspace(rank[solution[11]-1],rank[solution[12]-1],20)
plot(y11,pentes[22]*(y11)+pentes[23])
longueurs = longueurs + [rank[solution[12]-1]-rank[solution[11]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[11]-1,solution[12]-1,pentes[22],pentes[23],data,rank)]
if t>12:
y11=linspace(rank[solution[12]-1],rank[solution[13]-1],20)
plot(y11,pentes[24]*(y11)+pentes[25])
longueurs = longueurs + [rank[solution[13]-1]-rank[solution[12]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[12]-1,solution[13]-1,pentes[24],pentes[25],data,rank)]
if t>13:
y11=linspace(rank[solution[13]-1],rank[solution[14]-1],20)
plot(y11,pentes[26]*(y11)+pentes[27])
longueurs = longueurs + [rank[solution[14]-1]-rank[solution[13]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[13]-1,solution[14]-1,pentes[26],pentes[27],data,rank)]
if t>14:
y11=linspace(rank[solution[14]-1],rank[solution[15]-1],20)
plot(y11,pentes[28]*(y11)+pentes[29])
longueurs = longueurs + [rank[solution[15]-1]-rank[solution[14]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[14]-1,solution[15]-1,pentes[28],pentes[29],data,rank)]
if t>15:
y11=linspace(rank[solution[15]-1],rank[solution[16]-1],20)
plot(y11,pentes[30]*(y11)+pentes[31])
longueurs = longueurs + [rank[solution[16]-1]-rank[solution[15]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[15]-1,solution[16]-1,pentes[30],pentes[31],data,rank)]
if t>16:
y11=linspace(rank[solution[16]-1],rank[solution[17]-1],20)
plot(y11,pentes[32]*(y11)+pentes[33])
longueurs = longueurs + [rank[solution[17]-1]-rank[solution[16]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[16]-1,solution[17]-1,pentes[32],pentes[33],data,rank)]
if t>17:
y11=linspace(rank[solution[17]-1],rank[solution[18]-1],20)
plot(y11,pentes[34]*(y11)+pentes[35])
longueurs = longueurs + [rank[solution[18]-1]-rank[solution[17]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[17]-1,solution[18]-1,pentes[34],pentes[35],data,rank)]
if t>18:
y11=linspace(rank[solution[18]-1],rank[solution[19]-1],20)
plot(y11,pentes[36]*(y11)+pentes[37])
longueurs = longueurs + [rank[solution[19]-1]-rank[solution[18]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[18]-1,solution[19]-1,pentes[36],pentes[37],data,rank)]
if t>19:
y11=linspace(rank[solution[19]-1],rank[solution[20]-1],20)
plot(y11,pentes[38]*(y11)+pentes[39])
longueurs = longueurs + [rank[solution[20]-1]-rank[solution[19]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[19]-1,solution[20]-1,pentes[38],pentes[39],data,rank)]
if t>20:
y11=linspace(rank[solution[20]-1],rank[solution[21]-1],20)
plot(y11,pentes[40]*(y11)+pentes[41])
longueurs = longueurs + [rank[solution[21]-1]-rank[solution[20]-1]]
poidsDesPentesAvant = poidsDesPentesAvant + [poidsCalc(solution[20]-1,solution[21]-1,pentes[40],pentes[41],data,rank)]
gc.collect()
yticks(fontsize = 20)
xticks(fontsize = 20)
plt.xlabel("Rank", fontsize = 20)
plt.ylabel("Abundance", fontsize = 20)
plt.legend()
plt.title('Rank/abundance distribution for the station {}'.format(sN),fontsize = 20)
try :
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\BDD\\ProtistTV9subtabs293\\analyse\\station_{}_{}_{}_{}_{}graph.png'.format(nom,k,sN,dictionnaire[sN][0],dictionnaire[sN][1]))
except IndexError:
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\BDD\\ProtistTV9subtabs293\\analyse\\station_{}_{}graph.png'.format(nom,k))
except KeyError:
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\BDD\\ProtistTV9subtabs293\\analyse\\station_{}_{}graph.png'.format(nom,k))
del z
clf()
del y1
if t>1 :
del y2
if t>2 :
del y3
if t>3 :
del y4
if t>4 :
del y5
if t>5 :
del y6
if t>6 :
del y7
if t>7 :
del y8
if t>8 :
del y9
if t>9 :
del y10
if t>10:
del y11
pentesAConsiderer,tR,sN1,longueursBis,poidsDesPentes = modifPentes(solution,pentes,t,sN,longueurs,poidsDesPentesAvant)
return solution,pentes,t,pentesAConsiderer,tR,sN1,longueurs,longueursBis,poidsDesPentesAvant,poidsDesPentes
def poids(solution,k,data,poids_liste,tailleBin,rank) :
w=0
m=0 #un indice
l=0 #un deuxième indice
j=0 #un troisième indice
n=0 #un quatrième indice
a=0
b=0
getcontext().prec=20
gc.collect()
try:
while solution[m+1]!=0 : #Tant qu'on n'est pas sur le dernier segment exclut
if solution[m+2]!=0 : #Tant qu'on n'est pas sur le denrier segment inclut
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 27/11
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#w=poids_liste[m]+w
pass
else : #Si on est sur le dernier segment
l=solution[m]
j=solution[m+1]
vect_en_cours=data[l:j] #On récupère juste le morceau de courbe qui nous intéresse ici
a,b,c,d= fitting(vect_en_cours,tailleBin[l:j],rank[l:j])
nb=j-l #le nombre de points dans le segment étudié
while n<nb : #Tant qu'on est sur le segment
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 27/11
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF IMPORTANTE ICI LE 27/11
#val_courbe=b+n*a #La valeur sur la courbe
val_courbe=b+rank[l+n]*a #La valeur sur la courbe
ecart = float(abs(val_courbe-vect_en_cours[n]))
ecart=ecart**2 #L'écart avec les données
ratio=abs(vect_en_cours[n])
if ratio==0 : #Juste au cas ou, ces points ayant normalement ete supprimes dans le binning
ratio=1
pos=l+n
coeff=abs(Decimal(tailleBin[pos]))
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 02/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
poids_liste[m]=float(Decimal(poids_liste[m])+Decimal(ecart)*coeff/Decimal(ratio)) #Le poids ajouté pondéré par le nombre de points incluts dans le bining
#poids_liste[m]=float(Decimal(poids_liste[m])+Decimal(ecart)) #Le poids ajouté pondéré par le nombre de points incluts dans le bining
n=n+1
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 05/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#w=poids_liste[m]+w #On obtient le poids finial
#w=poids_liste[m]+w/n #On obtient le poids finial
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 27/11
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#w=poids_liste[m]+w/n #On obtient le poids finial
r=1
for nbBin in tailleBin[l:j] :
r=r+nbBin
w=poids_liste[m]+w/r
m=m+1
except IndexError:
#print("Les bornes des vecteurs sont depassees : k n'est surement pas adapte (on cree trop de segments)")
pass
return w,a,b,poids_liste
def fitting(vecteur_en_cours,tailleBinPartie,rank) :
i=0
ordonnee = vecteur_en_cours
for x in rank :
rank[i]=float(rank[i])
ordonnee[i]=float(ordonnee[i])
i=i+1
l=len(vecteur_en_cours)
k=0 #Un indice
j=0 #Un deuxième indice
#Les ordonnées servant à la regression linéaire
#vec=polyfit(rank,ordonnee,1,None,False,tailleBinPartie)
vec=polyfit(rank,ordonnee,1)
#print(vec)
a=vec[0]
b=vec[1]
gc.collect()
return a,b,0,l
def modifPentes(solution,pentes,t,sN,longueurs,poidsDesPentesAvant) :
inf=0 #La pos inf
debut=True #Initialisation
maxi=0 #La pos sup
tR=t
indice=-1
pentesAConsiderer=list()
sN1=list()
resLongueur=list()
poidsDesPentes = list()
#print(pentes)
for dot in solution :
if debut : #Initialisation
maxi=dot
debut = False
elif dot ==0 :
break
else :
inf=maxi
maxi=dot
if maxi-inf<10 : #On enlève la pente qui n'est pas intéressante
tR=tR-1
else :
#print(len(pentes))
pentesAConsiderer=pentesAConsiderer+[pentes[2*indice]] #On garde la pente
sN1=sN1+[sN]
resLongueur = resLongueur + [longueurs[indice]]
poidsDesPentes = poidsDesPentes + [poidsDesPentesAvant[indice]]
indice=indice+1
#////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#MODIF LE 01/12
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""test = True
while test :
test = False
indice = 0
l=len(pentesAConsiderer)
while indice < l :
if indice >0 and not test :
if (pentesAConsiderer[indice]-pentesAConsiderer[indice-1])/pentesAConsiderer[indice] < 0.1 * pentesAConsiderer[indice] :
pentesAConsiderer[indice-1] = (pentesAConsiderer[indice-1]*resLongueur[indice-1]+pentesAConsiderer[indice]/resLongueur[indice])/(resLongueur[indice-1]+resLongueur[indice])
pentesAConsiderer = retirer(pentesAConsiderer,indice)
tR = tR-1
resLongueur[indice-1] = resLongueur[indice-1]+resLongueur[indice]
resLongueur = retirer(resLongueur,indice)
l=l-1
sN1 = retirer(sN1,indice)
test = True
#print("MODIFICATION")
break
indice = indice + 1
if tR>2 :
tR=2
l=len(sN1)
pentesAConsiderer = pentesAConsiderer[l-2:l]
sN1 = sN1[l-2:l]
resLongueur=resLongueur[l-2:l]
poidsDesPentes=poidsDesPentes[l-2:l]"""
return(pentesAConsiderer,tR,sN1,resLongueur,poidsDesPentes)
def poidsCalc(rang1,rang2,a,b,data,rank) :
#y=ax+b
p = 0
i=0
a=float(a)
b=float(b)
data = data[rang1:rang2]
rank = rank[rang1:rang2]
for d in data :
p = p + (float(d)-(a*float(rank[i])+b))**2
i = i+1
return(p)
def inverserSol(liste) :
maximum = liste[-1]
liste=[maximum-x for x in liste]
liste.sort()
return(liste)
def inverserPentes(pentes) :
res= list()
p=0
coef1 = True
for p in pentes :
if coef1:
coef1 = False
a=p
else :
coef1 = True
res= [a,p]+res
return(res) | gpl-2.0 |
MadsJensen/malthe_alpha_project | stats_test_induced.py | 1 | 3802 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 12:35:33 2015
@author: mje
"""
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import permutation_cluster_test
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id = 1
tmin = 0.7
tmax = 2
# Setup for reading the raw data
ch_name = foo.info['ch_names'][220]
# Load condition 1
data_condition_1 = foo["ctl_L", "ctl_R"].get_data() # as 3D matrix
data_condition_1 *= 1e13 # change unit to fT / cm
data_condition_2= foo["ent_L", "ent_R"].get_data() # as 3D matrix
data_condition_2 *= 1e13 # change unit to fT / cm
# Take only one channel
data_condition_1 = data_condition_1[:, 220:221, :]
data_condition_2 = data_condition_2[:, 220:221, :]
# Time vector
times = 1e3 * foo.times # change unit to ms
# Factor to downsample the temporal dimension of the PSD computed by
# single_trial_power. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 1
frequencies = np.arange(8, 13, 1) # define frequencies of interest
sfreq = foo.info['sfreq'] # sampling in Hz
n_cycles = frequencies / 2.
foo_power_1 = single_trial_power(data_condition_1, sfreq=sfreq,
frequencies=frequencies,
n_cycles=n_cycles, decim=decim)
foo_power_2 = single_trial_power(data_condition_2, sfreq=sfreq,
frequencies=frequencies,
n_cycles=n_cycles, decim=decim)
foo_power_1 = foo_power_1[:, 0, :, :] # only 1 channel to get 3D matrix
foo_power_2 = foo_power_2[:, 0, :, :] # only 1 channel to get 3D matrix
# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 950
foo_baseline_1 = np.mean(foo_power_1[:, :, baseline_mask], axis=2)
foo_power_1 /= foo_baseline_1[..., np.newaxis]
foo_baseline_2 = np.mean(foo_power_2[:, :, baseline_mask], axis=2)
foo_power_2 /= foo_baseline_2[..., np.newaxis]
###############################################################################
# Compute statistic
threshold = 4
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([foo_power_1, foo_power_2],
n_permutations=5000, threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
plt.plot(times, evoked_contrast.T)
plt.title('Contrast of evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 200)
plt.subplot(2, 1, 2)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
plt.show()
| mit |
gfrubi/GR | figuras-editables/fig-fermi-rel-masa-radio.py | 2 | 3841 | # -*- coding: utf-8 -*-
from matplotlib.pyplot import *
from numpy import *
from scipy.integrate import odeint, quad
def dm_dt(m_t, equis):
#retorna dm, dt
m = m_t[0]
t = m_t[1]
dmdx = equis**2*(sinh(t)- t)
dtdx = (-4.0/(equis*(equis-2.0*m)))*((equis**3/3.0)*(sinh(t)
- 8.0*sinh(t/2.0) + 3.0*t) + m)*((sinh(t) - 2.0*sinh(t/2.0))/(cosh(t)-4.0*cosh(t/2.0)+3.0))
#print(unoportres)
return (dmdx, dtdx)
####################################################################
####### Parte relativista ##########################################
####################################################################
te_0b = linspace(0.1,10.5,1200)
equis = linspace(1.0e-6, 15.0, 1000)
x1s = []
m1s = []
for t in te_0b:
m0_t0 = [0.0, t]
sol = odeint(dm_dt, m0_t0, equis)
if len(where(sol[:,0]<0)[0]) is not 0:
pos = (where(sol[:,0] < 0)[0][0])-1#tiene algunos nan, por eso se cae
elif len(where(isnan(sol[:,0])==True)[0]) is not 0:
pos = where(isnan(sol[:,0])==True)[0][0]-1 #Es para la solucion de t XXXXXXXXXXXXXXXXXX
dte = dm_dt([sol[pos,0], sol[pos,1]], equis[pos])[1] #el valor de dt en la pos
#aqui solo podemos hacer una extrapolacion lineal (una recta), pues no tenemos la segunda derivada
x1 = equis[pos] - sol[pos,1] / dte
x1s.append(x1)
dme = dm_dt([sol[pos,0], sol[pos,1]], equis[pos])[0] #el vamos de dm en la pos
m1 = dme * (x1 - equis[pos]) + sol[pos,0] #esto es m(x1)
m1s.append(m1)
densidad = 5.725e17*(sinh(te_0b) - te_0b) # kg/m^3
radio = 13.683*array(x1s) # km
masa = 9.2648*array(m1s) #en masas solares
def dphialt(phi, eta, iyo):
return(phi[1], -2.0*phi[1]/eta-(abs(phi[0]**2 - iyo**2))**(1.5))
def dphi(phi, eta, iyo):
return(phi[1], -2.0*phi[1]/eta-(phi[0]**2 - iyo**2 )**(1.5))
####################################################################
####### Parte newtoniana ###########################################
####################################################################
phi0 = [1.0, 0.0] #Las mismas condiciones iniciales que en Lane-Emden
eta = linspace(1.0e-30, 40.0, 1000)
eta1s = []
eta2Tpx1s = []
radioNS = []
masaNS = []
yceros = linspace(0.0, 0.9997, 1000)
for y in yceros:
sol = odeint(dphialt, phi0, eta, args=(y,))
pos = (where(sol[:,0] - y < 0)[0][0])-1
ddphi = dphi([sol[pos,0], sol[pos,1]], eta[pos], y)[1] #segunda derivada en la ultima posicion
#print(ddphi)
Deta0 = roots([0.5*ddphi,sol[pos,1],sol[pos,0]-y])
Deta = sort(Deta0[where(Deta0>0)])[0]
eta1 = eta[pos]+Deta
eta1s.append(eta1)
eta2Tpx1 = -eta1**2.0*(sol[pos,1] + Deta*ddphi)
eta2Tpx1s.append(eta2Tpx1)
radioNS = 4.18945*array(eta1s)*yceros
masaNS = 2.83673*array(eta2Tpx1s)
rhoNS = 6.10656e18 * (yceros**(-2.0) - 1.0)**(3.0/2)
erreNS = linspace(1.0e-30, 30, 10000)
emeNS = 3.4518*(10.0/erreNS)**3 #En masas solares
# Caso ultra relativista, alta densidad.
Mch = 5.7252
colores = ['blue','brown','red', 'purple']
dasheses = [[],[5,2],[5,2,2,2],[2,2]]
masita = linspace(0,6,10000)
radio_s = 2.953699653*masita #radio de Schwarzschild
figure(figsize=(8,6))
plot(masaNS, radioNS, colores[2], dashes=dasheses[1], linewidth=1.50, label='Fermi $\mu=2$ newtoniano')
plot(emeNS, erreNS, colores[0], dashes=dasheses[2], linewidth=1.50, label='Lane-Emden $\gamma=5/3$ newtoniano')
plot(masa, radio, colores[3], dashes=dasheses[0], linewidth=1.50, label='Fermi relativista')
plot(masita, radio_s, colores[1], dashes=dasheses[3], linewidth=1.50, label='$R_{Sch}$')
vlines(Mch,0,30, label='$M_{Ch}/M_{solar}$',linestyles='dashdot')
xlim(0,6)
ylim(0,30)
xlabel('masa $[M/M_{\odot}]$',fontsize=15)
ylabel('radio $[km]$',fontsize=15)
scatter(1,10, c='black')
grid(linestyle='dotted')
legend()
savefig('../fig/fig-fermi-rel-masa-radio.pdf')
#show()
| gpl-3.0 |
chris1610/pbpython | code/create_ppt.py | 1 | 5119 | """
See http://pbpython.com/creating-powerpoint.html for details on this script
Requires https://python-pptx.readthedocs.org/en/latest/index.html
Example program showing how to read in Excel, process with pandas and
output to a PowerPoint file.
"""
from __future__ import print_function
from pptx import Presentation
from pptx.util import Inches
import argparse
import pandas as pd
import numpy as np
from datetime import date
import matplotlib.pyplot as plt
import seaborn as sns
def df_to_table(slide, df, left, top, width, height, colnames=None):
"""Converts a Pandas DataFrame to a PowerPoint table on the given
Slide of a PowerPoint presentation.
The table is a standard Powerpoint table, and can easily be modified with the Powerpoint tools,
for example: resizing columns, changing formatting etc.
Arguments:
- slide: slide object from the python-pptx library containing the slide on which you want the table to appear
- df: Pandas DataFrame with the data
Optional arguments:
- colnames
https://github.com/robintw/PandasToPowerpoint/blob/master/PandasToPowerpoint.py
"""
rows, cols = df.shape
res = slide.shapes.add_table(rows + 1, cols, left, top, width, height)
if colnames is None:
colnames = list(df.columns)
# Insert the column names
for col_index, col_name in enumerate(colnames):
# Column names can be tuples
if not isinstance(col_name, str):
col_name = " ".join(col_name)
res.table.cell(0, col_index).text = col_name
m = df.as_matrix()
for row in range(rows):
for col in range(cols):
val = m[row, col]
text = str(val)
res.table.cell(row + 1, col).text = text
def parse_args():
""" Setup the input and output arguments for the script
Return the parsed input and output files
"""
parser = argparse.ArgumentParser(description='Create ppt report')
parser.add_argument('infile',
type=argparse.FileType('r'),
help='Powerpoint file used as the template')
parser.add_argument('report',
type=argparse.FileType('r'),
help='Excel file containing the raw report data')
parser.add_argument('outfile',
type=argparse.FileType('w'),
help='Output powerpoint report file')
return parser.parse_args()
def create_pivot(df, index_list=["Manager", "Rep", "Product"],
value_list=["Price", "Quantity"]):
"""
Take a DataFrame and create a pivot table
Return it as a DataFrame pivot table
"""
table = pd.pivot_table(df, index=index_list,
values=value_list,
aggfunc=[np.sum, np.mean], fill_value=0)
return table
def create_chart(df, filename):
""" Create a simple bar chart saved to the filename based on the dataframe
passed to the function
"""
df['total'] = df['Quantity'] * df['Price']
final_plot = df.groupby('Name')['total'].sum().order().plot(kind='barh')
fig = final_plot.get_figure()
fig.set_size_inches(6, 4.5)
fig.savefig(filename, bbox_inches='tight', dpi=600)
def create_ppt(input, output, report_data, chart):
""" Take the input powerpoint file and use it as the template for the output
file.
"""
prs = Presentation(input)
# Use the output from analyze_ppt to understand which layouts and placeholders
# to use
# Create a title slide first
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
title = slide.shapes.title
subtitle = slide.placeholders[1]
title.text = "Quarterly Report"
subtitle.text = "Generated on {:%m-%d-%Y}".format(date.today())
# Create the summary graph
graph_slide_layout = prs.slide_layouts[8]
slide = prs.slides.add_slide(graph_slide_layout)
title = slide.shapes.title
title.text = "Sales by account"
placeholder = slide.placeholders[1]
pic = placeholder.insert_picture(chart)
subtitle = slide.placeholders[2]
subtitle.text = "Results consistent with last quarter"
# Create a slide for each manager
for manager in report_data.index.get_level_values(0).unique():
#print(report_data.xs(manager, level=0).reset_index())
slide = prs.slides.add_slide(prs.slide_layouts[2])
title = slide.shapes.title
title.text = "Report for {}".format(manager)
top = Inches(1.5)
left = Inches(0.25)
width = Inches(9.25)
height = Inches(5.0)
# Flatten the pivot table by resetting the index
# Create a table on the slide
df_to_table(slide, report_data.xs(manager, level=0).reset_index(),
left, top, width, height)
prs.save(output)
if __name__ == "__main__":
args = parse_args()
df = pd.read_excel(args.report.name)
report_data = create_pivot(df)
create_chart(df, "report-image.png")
create_ppt(args.infile.name, args.outfile.name, report_data, "report-image.png")
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/frame/test_replace.py | 2 | 45789 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import re
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import (DataFrame, Series, Index, date_range, compat,
Timestamp)
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReplace(TestData):
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
# mixed type
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# simplest cases
# regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
# regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
# same as above with inplace=True
# lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
# mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
# lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
# dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2})
expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2})
result = df.replace({'Type': {'Q': 0, 'T': 1}})
assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
res = df.replace(0, 'a')
assert_frame_equal(res, expec)
assert res.a.dtype == np.object_
@pytest.mark.parametrize('metachar', ['[]', '()', r'\d', r'\w', r'\s'])
def test_replace_regex_metachar(self, metachar):
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
# GH 11698
# test for mixed data types.
df = pd.DataFrame([('-', pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
df1 = df.replace('-', np.nan)
expected_df = pd.DataFrame([(np.nan, pd.to_datetime('20150101')),
('a', pd.to_datetime('20150102'))])
assert_frame_equal(df1, expected_df)
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
# lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_with_empty_list(self):
# GH 21977
s = pd.Series([['a', 'b'], [], np.nan, [1]])
df = pd.DataFrame({'col': s})
expected = df
result = df.replace([], np.nan)
assert_frame_equal(result, expected)
# GH 19266
with tm.assert_raises_regex(ValueError, "cannot assign mismatch"):
df.replace({np.nan: []})
with tm.assert_raises_regex(ValueError, "cannot assign mismatch"):
df.replace({np.nan: ['dummy', 'alt']})
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame(
{'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
mf = self.mixed_frame
mf.iloc[5:20, mf.columns.get_loc('foo')] = nan
mf.iloc[-10:, mf.columns.get_loc('A')] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df, expected)
# int block splitting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64'),
'C': Series([1, 2], dtype='int64')})
expected = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0.5, 1], dtype='float64'),
'C': Series([1, 2], dtype='int64')})
result = df.replace(0, 0.5)
assert_frame_equal(result, expected)
# to object block upcasting
df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'),
'B': Series([0, 1], dtype='int64')})
expected = DataFrame({'A': Series([1, 'foo'], dtype='object'),
'B': Series([0, 1], dtype='int64')})
result = df.replace(2, 'foo')
assert_frame_equal(result, expected)
expected = DataFrame({'A': Series(['foo', 'bar'], dtype='object'),
'B': Series([0, 'foo'], dtype='object')})
result = df.replace([1, 2], ['foo', 'bar'])
assert_frame_equal(result, expected)
# test case from
df = DataFrame({'A': Series([3, 0], dtype='int64'),
'B': Series([0, 3], dtype='int64')})
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0, 0] = m[0]
expected.iloc[1, 1] = m[1]
assert_frame_equal(result, expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
assert_frame_equal(expected, result)
def test_replace_value_is_none(self):
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
@pytest.mark.parametrize('frame, to_replace, value, expected', [
(DataFrame({'ints': [1, 2, 3]}), 1, 0,
DataFrame({'ints': [0, 2, 3]})),
(DataFrame({'ints': [1, 2, 3]}, dtype=np.int32), 1, 0,
DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)),
(DataFrame({'ints': [1, 2, 3]}, dtype=np.int16), 1, 0,
DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)),
(DataFrame({'bools': [True, False, True]}), False, True,
DataFrame({'bools': [True, True, True]})),
(DataFrame({'complex': [1j, 2j, 3j]}), 1j, 0,
DataFrame({'complex': [0j, 2j, 3j]})),
(DataFrame({'datetime64': Index([datetime(2018, 5, 28),
datetime(2018, 7, 28),
datetime(2018, 5, 28)])}),
datetime(2018, 5, 28), datetime(2018, 7, 28),
DataFrame({'datetime64': Index([datetime(2018, 7, 28)] * 3)})),
# GH 20380
(DataFrame({'dt': [datetime(3017, 12, 20)], 'str': ['foo']}),
'foo', 'bar',
DataFrame({'dt': [datetime(3017, 12, 20)], 'str': ['bar']})),
(DataFrame({'A': date_range('20130101', periods=3, tz='US/Eastern'),
'B': [0, np.nan, 2]}),
Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]}))
])
def test_replace_dtypes(self, frame, to_replace, value, expected):
result = getattr(frame, 'replace')(to_replace, value)
assert_frame_equal(result, expected)
def test_replace_input_formats_listlike(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
pytest.raises(ValueError, df.replace, to_rep, values[1:])
def test_replace_input_formats_scalar(self):
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
# dict to scalar
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
pytest.raises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
assert_frame_equal(res1, res2)
assert_frame_equal(res2, res3)
assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = pd.read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({r'\D': 1})
assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assert_raises_regex(ValueError,
"Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
assert_frame_equal(res, expect)
def test_replace_period(self):
d = {
'fname': {
'out_augmented_AUG_2011.json':
pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json':
pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json':
pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json':
pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json':
pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json':
pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json':
pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
# We don't support converting object -> specialized EA in
# replace yet.
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]},
dtype=object)
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
assert set(df.fname.values) == set(d['fname'].keys())
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
assert_frame_equal(result, expected)
def test_replace_datetimetz(self):
# GH 11326
# behaving poorly when presented with a datetime64[ns, tz]
df = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [0, np.nan, 2]})
result = df.replace(np.nan, 1)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': Series([0, 1, 2], dtype='float64')})
assert_frame_equal(result, expected)
result = df.fillna(1)
assert_frame_equal(result, expected)
result = df.replace(0, np.nan)
expected = DataFrame({'A': date_range('20130101', periods=3,
tz='US/Eastern'),
'B': [np.nan, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.replace(Timestamp('20130102', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Eastern'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Eastern'))
assert_frame_equal(result, expected)
# coerce to object
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace(
{'A': pd.NaT}, Timestamp('20130104', tz='US/Pacific'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104', tz='US/Pacific'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
result = df.copy()
result.iloc[1, 0] = np.nan
result = result.replace({'A': np.nan}, Timestamp('20130104'))
expected = DataFrame({'A': [Timestamp('20130101', tz='US/Eastern'),
Timestamp('20130104'),
Timestamp('20130103', tz='US/Eastern')],
'B': [0, np.nan, 2]})
assert_frame_equal(result, expected)
def test_replace_with_empty_dictlike(self):
# GH 15289
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
assert_frame_equal(df, df.replace({}))
assert_frame_equal(df, df.replace(Series([])))
assert_frame_equal(df, df.replace({'b': {}}))
assert_frame_equal(df, df.replace(Series({'b': {}})))
@pytest.mark.parametrize("to_replace, method, expected", [
(0, 'bfill', {'A': [1, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
(nan, 'bfill', {'A': [0, 1, 2],
'B': [5.0, 7.0, 7.0],
'C': ['a', 'b', 'c']}),
('d', 'ffill', {'A': [0, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
([0, 2], 'bfill', {'A': [1, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
([1, 2], 'pad', {'A': [0, 0, 0],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
((1, 2), 'bfill', {'A': [0, 2, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']}),
(['b', 'c'], 'ffill', {'A': [0, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'a', 'a']}),
])
def test_replace_method(self, to_replace, method, expected):
# GH 19632
df = DataFrame({'A': [0, 1, 2],
'B': [5, nan, 7],
'C': ['a', 'b', 'c']})
result = df.replace(to_replace=to_replace, value=None, method=method)
expected = DataFrame(expected)
assert_frame_equal(result, expected)
| bsd-3-clause |
facepalm/kivy-colony-game | planetview.py | 1 | 6153 | import kivy
kivy.require('1.9.2')
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.graphics import Line, Color, Rectangle
from kivy.graphics.texture import Texture
from kivy.uix.scrollview import ScrollView
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import Screen
import math
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from kivy.core.window import Window
from kivy.graphics.context_instructions import Scale
from kivy.lang import Builder
import globalvars
import matplotlib.pyplot as plt
import planetresources
import siteview
import util
from resource_views import RawResourceSquare
kv = '''
<PlanetPanel>:
size_hint: 0.9, 0.9
#size: app.root_window.width*0.8,app.root_window.height*0.8
pos_hint: {'center_x': .5, 'center_y': .5}
id: panel
StackLayout:
id: panel2
size_hint: 1, 1
canvas:
Color:
rgb: (0.05, 0.05, 0.05)
Rectangle:
size: self.size
pos: self.pos
Color:
rgb: (0.5, 0.5, 0.75)
BorderImage:
border: 10,10,10,10
source: 'images/kivy/button_white.png'
pos: self.pos
size: self.size
FloatLayout:
size_hint: 0.25, 0.25
canvas:
Color:
rgb: (0.5, 0.5, 0.5)
BorderImage:
border: 5,5,5,5
source: 'images/kivy/button_white.png'
pos: self.pos
size: self.size
Image:
size_hint: 1, 1
pos_hint: {'center_x': .5, 'center_y': .5}
source: panel.planet.image
Button:
size_hint: .2, .2
pos_hint: {'center_x': .88, 'center_y': .12}
text: "View"
on_press: panel.switch_system()
BoxLayout:
orientation: 'vertical'
size_hint: 0.5, 0.25
padding: 10, 10, 10, 10
Label:
text: panel.planet.type + ' "' + panel.planet.name + '"'
font_size: 24
Label:
text: "Orbit: %.2f AU Launch dV: %.2f km/s" % (panel.planet.orbit, panel.planet.launch_dv()/1000)
font_size: 16
Label:
text: "{0:.0f} % explored".format(100*panel.planet.explored)
font_size: 16
id: exploration_string
BoxLayout:
size_hint: 0.25, 0.25
id: resimg
#pos_hint: {'center_x': .5, 'center_y': .5}
ScrollView:
do_scroll_x: False
pos_hint: {'center_x': .5, 'center_y': .5}
size_hint: 1, 0.75
BoxLayout:
pos_hint: {'center_x': .5, 'center_y': .5}
orientation: 'vertical'
id: panel3
size_hint_y: None
height: 100*len(panel.planet.sites)
'''
Builder.load_string(kv)
class PlanetPanel(Screen):
def __init__(self, **kwargs):
self.planet = kwargs['planet']
self.name = util.short_id(self.planet.id)+'-planet'
nr = 1.0*planetresources.raw_num
pr = self.planet.resources.raw.squeeze()
x = np.arange(0,2*math.pi,(2/nr)*math.pi)
x = np.append(x,0)
pr = np.append(pr,pr[0])
print x, pr
'''plt.polar(x,pr,'b')
plt.fill_between(x,pr,color='#5c7de8',alpha=0.75)
plt.thetagrids(np.arange(0,360,360/nr),[])#planetresources.raw_names)
plt.ylim(0,1)
plt.yticks([2.0])
plt.savefig('temp.png',bbox_inches='tight',dpi=300)
plt.clf()'''
#im = plt.imread('temp.png')
#width=250
#self.imbuf = im[300-width:300+width,410-width:410+width,:]
#self.imtex = Texture.create(size=(500,500), colorfmt='rgba')
#self.imtex.blit_buffer(self.imbuf.tostring(), colorfmt='rgba', bufferfmt='ubyte')
#plt.imshow(im[300-width:300+width,410-width:410+width,:])
#plt.savefig('temp.png')
#plt.show()
super(PlanetPanel, self).__init__(**kwargs)
pr = RawResourceSquare(planetresources = self.planet.resources)
self.ids['resimg'].add_widget(pr)
for s in self.planet.sites:
b = BoxLayout(size_hint_y =None,height=100)
b.add_widget(s.small_view())
self.ids['panel3'].add_widget(b)
#Window.bind(on_keyboard=self.onBackBtn)
def switch_system(self):
print 'switching to',self.planet.name
globalvars.root.onNextScreen(util.short_id(self.planet.id)+"-system" )
def on_pre_enter(self):
for s in self.ids['panel3'].children: s.children[0].refresh_ships()
self.ids['exploration_string'].text = "{0:.0f} % explored".format(100*self.planet.explored)
'''def on_touch_down(self, touch):
touch.push()
touch.apply_transform_2d(self.to_widget)
touched = self.collide_point(*touch.pos)
touch.pop()
if not touched:
globalvars.root.remove_widget(self)
return True
else:
return super(PlanetPanel, self).on_touch_down(touch)
#return True
def onBackBtn(self, window, key, *args):
""" To be called whenever user presses Back/Esc Key """
# If user presses Back/Esc Key
if key == 27 or key == 1001:
if self in globalvars.root.children:
globalvars.root.remove_widget(self)
return True
return False '''
| gpl-3.0 |
ncos/hometasks | CompMath/task_1/task_1_manual.py | 1 | 1777 | #!/usr/bin/python
# y'' - 2y'-y = -2xe^x, 0 <= x <= 1, y'(0)-y(0)=1, y(1)=e(2cosh(sqrt(2)+1)
import numpy as np
import matplotlib.pyplot as plt
from math import *
## Tri Diagonal Matrix Algorithm(a.k.a Thomas algorithm) solver
def TDMA(a, b, c, d):
'''
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
'''
nf = len(a) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy the array
for it in xrange(1, nf):
mc = ac[it]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = ac
xc[-1] = dc[-1]/bc[-1]
for il in xrange(nf-2, -1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
del bc, cc, dc # delete variables from memory
return xc
'''
# Example:
M = [[1,2,0,0],
[5,3,2,0],
[0,9,3,5],
[0,0,3,5]]
a = [0.0, 5.0, 9.0, 3.0]
c = [2.0, 2.0, 5.0, 0.0]
b = [1.0, 3.0, 3.0, 5.0]
f = [1.0, 2.0, 3.0, 4.0]
x = TDMA(a, b, c, f)
print x
'''
N = 50
a = 0.0
b = 1.0
h = (b-a)/float(N)
ya = 1.0
X = [a+i*h for i in xrange(N+1)]
def _f(x):
return
print ("N = "+str(N)+"; solving on ("+str(a)+", "+str(b)+"); h = "+str(h))
print X
def q(x):
return 1.0
def p(x):
return x
def f(x):
return 0.0
b = [-2.0-h**2*q(X[i]) for i in range(1, N, 1)]
c = [1.0+p(X[i])*h/2.0 for i in range(1, N-1, 1)]
a = [0.0] + [1.0-p(X[i])*h/2.0 for i in range(2, N, 1)]
f = [h**2*f(X[i]) for i in range(1, N, 1)]
f[0] = f[0] - (1.0-p(X[1])*h/2.0)*ya
a += [-1.0/h]
b += [2.0+1.0/h]
c += [1.0+p(X[N-1])*h/2.0] + [0.0]
f += [0.0]
print b
print a
print c
print f
Y = [ya] + TDMA(a, b, c, f).tolist()
print "y =", Y
plt.plot(X, Y, 'ro')
plt.show()
| mit |
barbagroup/PetIBM | examples/navierstokes/liddrivencavity2dRe3200/scripts/plotCenterlineVelocities.py | 2 | 3782 | """
Plots the velocities along the centerlines of the 2D cavity at Reynolds number
3200 and compares with the numerical data reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import pathlib
import numpy
import h5py
from matplotlib import pyplot
# User's parameters
Re = 3200.0 # Reynolds number
time_step = 25000 # Time step at which to read the solution
# End of user's parameters
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
root_dir = os.environ.get('PETIBM_EXAMPLES')
if not root_dir:
root_dir = simu_dir.parents[1]
def get_gridline_velocity(x_target, u, x, axis=0):
i = numpy.where(x < x_target)[0][-1]
x_a, x_b = x[i], x[i + 1]
if axis == 0:
u_a, u_b = u[:, i], u[:, i + 1]
elif axis == 1:
u_a, u_b = u[i], u[i + 1]
return (u_a * (x_b - x_target) + u_b * (x_target - x_a)) / (x_b - x_a)
def read_data_ghia_et_al_1982(filepath, Re):
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
re2col = {100.0: (1, 7), 1000.0: (2, 8), 3200.0: (3, 9), 5000.0: (4, 10),
10000.0: (5, 11)}
return {'vertical': {'y': data[0], 'u': data[re2col[Re][0]]},
'horizontal': {'x': data[6], 'v': data[re2col[Re][1]]}}
def read_field_hdf5(name, fieldpath, gridpath):
field = {}
f = h5py.File(gridpath, 'r')
field['x'], field['y'] = f[name]['x'][:], f[name]['y'][:]
f = h5py.File(fieldpath, 'r')
field['values'] = f[name][:]
return field
# Reads data from Ghia et al. (1982).
filepath = root_dir / 'data' / 'ghia_et_al_1982_lid_driven_cavity.dat'
ghia = read_data_ghia_et_al_1982(filepath, Re)
# Reads gridlines and velocity fields.
gridpath = data_dir / 'grid.h5'
filepath = data_dir / '{:0>7}.h5'.format(time_step)
u = read_field_hdf5('u', filepath, gridpath)
v = read_field_hdf5('v', filepath, gridpath)
# Computes x-velocity along vertical gridline at mid-cavity.
x_target = 0.5
u['vertical'] = get_gridline_velocity(x_target, u['values'], u['x'], axis=0)
# Computes y-velocity along horizontal gridline at mid-cavity.
y_target = 0.5
v['horizontal'] = get_gridline_velocity(y_target, v['values'], v['y'], axis=1)
pyplot.rc('font', family='serif', size=16)
# Plots the centerline velocities.
simu_kwargs = {'label': 'PetIBM',
'color': '#336699', 'linestyle': '-', 'linewidth': 3,
'zorder': 10}
ghia_kwargs = {'label': 'Ghia et al. (1982)',
'color': '#993333', 'linewidth': 0,
'markeredgewidth': 2, 'markeredgecolor': '#993333',
'markerfacecolor': 'none',
'marker': 'o', 'markersize': 8,
'zorder': 10}
fig, ax = pyplot.subplots(nrows=2, figsize=(8.0, 8.0))
fig.suptitle('Re = {}'.format(int(Re)))
ax[0].grid()
ax[0].set_xlabel('y')
ax[0].set_ylabel('u (x={})'.format(x_target))
ax[0].plot(u['y'], u['vertical'], **simu_kwargs)
ax[0].plot(ghia['vertical']['y'], ghia['vertical']['u'], **ghia_kwargs)
ax[0].axis((0.0, 1.0, -0.75, 1.25))
ax[0].legend(loc='upper left')
ax[1].grid()
ax[1].set_xlabel('x')
ax[1].set_ylabel('v (y={})'.format(y_target))
ax[1].plot(v['x'], v['horizontal'], **simu_kwargs)
ax[1].plot(ghia['horizontal']['x'], ghia['horizontal']['v'], **ghia_kwargs)
ax[1].axis((0.0, 1.0, -0.75, 1.25))
ax[1].legend(loc='upper left')
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'centerlineVelocities{:0>7}.png'.format(time_step)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
clarka34/exploringShipLogbooks | exploringShipLogbooks/tests/test_basic_utils.py | 2 | 5023 | """ Unit tests for basic_utils.py """
import exploringShipLogbooks
import pep8
import unittest
import exploringShipLogbooks.basic_utils as bu
import numpy as np
import os.path as op
import pandas as pd
class TestBasicUtils(unittest.TestCase):
def setUp(self):
# set up the values to be used in the unit tests
d = {'ID': [0, 1, 2, 3], 'WindDirection': ['North', 'easT', 'sOuTh',
'west'], 'ProbWindDD': [4, 5, 6, 7]}
self.df = pd.DataFrame(d)
self.desired_columns = ['ID']
def testDataFolderExists(self):
# test to make sure the zip file containing the data exists
data_path = op.join(exploringShipLogbooks.__path__[0], 'data')
path_exists = op.exists(data_path + '/climate-data-from-ocean-ships.zip')
self.assertTrue(path_exists)
def testFilenameCorrect(self):
# test to make sure the function works if a valid filename is entered
test_data = bu.extract_logbook_data('Lookup_UK_WindDirection.csv')
self.assertTrue(test_data.columns.values[0], 'ID')
self.assertTrue(test_data.columns.values[0], 'WindDirection')
self.assertTrue(test_data.columns.values[0], 'ProbWindDD')
def testWrongFilename(self):
# test to make sure a KeyError is raised if an invalid filename is
# entered
self.assertRaises(KeyError, bu.extract_logbook_data('fake_filename'))
def testDesiredColumns(self):
# test to make sure isolate_columns returns a dataframe with the
# desired columns
df_copy = bu.isolate_columns(self.df.copy(), self.desired_columns)
self.assertTrue(df_copy.columns.values[0], 'ID')
def testUndesiredColumns(self):
# test to make sure the names of the undesired columns are returned
undesired_columns = bu.remove_undesired_columns(self.df.copy(),
self.desired_columns)
self.assertTrue(undesired_columns[0], 'ProbWindDD')
self.assertTrue(undesired_columns[1], 'WindDirection')
def testCleanData(self):
# test to make sure the string values are converted to lower case
df_copy = bu.clean_data(self.df.copy())
self.assertTrue(df_copy['WindDirection'][0], 'north')
self.assertTrue(df_copy['WindDirection'][1], 'east')
self.assertTrue(df_copy['WindDirection'][2], 'south')
self.assertTrue(df_copy['WindDirection'][3], 'west')
def testLabelEncoder(self):
# test to make sure the LabelEncoder converts categorical data to
# numerical data
encoded_data = bu.label_encoder(self.df['WindDirection'])
self.assertTrue(np.array_equal(encoded_data, np.array([0, 1, 2, 3])))
def testLabelEncoderKey(self):
# test to make check that the LabelEncoder returns the correct key
encoded_data_key = bu.label_encoder_key(self.df['WindDirection'])
self.assertTrue(encoded_data_key[0], 'North')
self.assertTrue(encoded_data_key[1], 'easT')
self.assertTrue(encoded_data_key[2], 'sOuTh')
self.assertTrue(encoded_data_key[3], 'west')
def testOneHotEncoder(self):
# test to make sure the OneHotEncoder converts numerical data to one
# hot encoded data
encoded_data = bu.one_hot_encoder(self.df['WindDirection'])
self.assertTrue(np.array_equal(encoded_data[:, 0],
np.array([1, 0, 0, 0])))
def TestEncodeDataNaiveBayes(self):
# test encoder using the Naive Bayes algorithm and one hot encoding
encoded_data, encoder = bu.encode_data(self.df, 'Naive Bayes')
self.assertTrue(np.array_equal(encoded_data[:, 2],
np.array([1, 0, 0, 0])))
def TestEncodeDataDecisionTree(self):
# test encoder using the decision tree algorithm and label encoding
encoded_data, encoder = bu.encode_data(self.df, 'Decision Tree')
self.assertTrue(np.array_equal(encoded_data[:, 2],
np.array([0, 1, 2, 3])))
def TestEncodeDataNaiveBayesDF(self):
# test conversion of encoded data to pandas dataframe using using the
# Naive Bayes classification algorithm
encoded_df = bu.encode_data_df(self.df, 'Naive Bayes')
columns = encoded_df.columns.values
self.assertEqual(columns[0], 'ID')
self.assertEqual(columns[1], 'ProbWindDD')
self.assertEqual(columns[2], 'North')
self.assertEqual(columns[3], 'easT')
self.assertEqual(columns[4], 'sOuTh')
self.assertEqual(columns[5], 'west')
def TestEncodeDataDecisionTreeDF(self):
# test conversion of encoded data to pandas dataframe using using the
# decision tree classification algorithm
encoded_df = bu.encode_data_df(self.df, 'Decision Tree')
columns = encoded_df.columns.values
self.assertEqual(columns[0], 'ID')
self.assertEqual(columns[1], 'ProbWindDD')
self.assertEqual(columns[2], 'WindDirection')
| mit |
trmznt/fatools | fatools/lib/fautil/algo2.py | 2 | 22359 | import numpy as np
import math
from fatools.lib.utils import cerr, cverr, is_verbosity
from fatools.lib import const
from fatools.lib.fautil.hcalign import align_hc
from fatools.lib.fautil.gmalign import align_gm, align_sh, align_de
from fatools.lib.fautil.pmalign import align_pm
from scipy import signal, ndimage
from scipy.optimize import curve_fit
from peakutils import indexes
from matplotlib import pyplot as plt
from sortedcontainers import SortedListWithKey
import attr
@attr.s(repr=False)
class Peak(object):
rtime = attr.ib(default=-1)
rfu = attr.ib(default=-1)
area = attr.ib(default=-1)
brtime = attr.ib(default=-1)
ertime = attr.ib(default=-1)
srtime = attr.ib(default=-1)
beta = attr.ib(default=-1)
theta = attr.ib(default=-1)
omega = attr.ib(default=-1)
size = attr.ib(default=-1)
bin = attr.ib(default=-1)
def __repr__(self):
return "<P: %4d | %4d | %5d | %2d | %+3.2f | b %4.1f | t %4.2f | o %3d>" % (
self.rtime, self.rfu, self.area, self.ertime - self.brtime, self.srtime,
self.beta, self.theta, self.omega)
@attr.s
class Channel(object):
data = attr.ib()
marker = attr.ib()
alleles = attr.ib(default=list)
fsa = attr.ib(default=None)
def scan(self, params, offset=0):
if self.is_ladder():
alleles = scan_peaks(self, params.ladder)
else:
alleles = scan_peaks(self, params.ladder, offset)
cverr(1, "# scanning %s: %d peak(s)" % (self.marker, len(alleles)))
return alleles
def align(self, params):
if not self.is_ladder():
raise RuntimeError('ERR: cannot align non-ladder channel')
ladders, qcfunc = self.fsa.get_ladder_parameter()
result = align_peaks(self, params.ladder, ladders, qcfunc)
def scan_peaks(channel, params, offset=0):
"""
"""
cerr('I: scanning peaks for: %s' % channel)
# check if channel is ladder channel, and adjust expected_peak_number accordingly
expected_peak_number = params.expected_peak_number
if channel.is_ladder():
expected_peak_number = len(channel.fsa.panel.get_ladder()['sizes'])
else:
# otherwise, calculate min_rtime for offset
if len(channel.fsa.ztranspose) <= 0:
raise RuntimeError('ztranspose has not been calculated!')
min_size = channel.marker.min_size
f = np.poly1d( channel.fsa.ztranspose )
offset = int(round(f(min_size)))
channel.offset = offset
initial_peaks = find_peaks(channel.data, params, offset, expected_peak_number)
# create alleles based on these peaks
alleles = []
for p in initial_peaks:
allele = channel.Allele(
rtime = p.rtime,
rfu = p.rfu,
area = p.area,
brtime = p.brtime,
ertime = p.ertime,
wrtime = p.wrtime,
srtime = p.srtime,
beta = p.beta,
theta = p.theta,
omega = p.omega,
)
allele.type = const.peaktype.scanned
allele.method = const.binningmethod.notavailable
allele.marker = channel.marker
channel.add_allele( allele )
alleles.append( allele )
channel.status = const.channelstatus.scanned
return alleles
def align_peaks(channel, params, ladder, anchor_pairs=None):
"""
returns (score, rss, dp, aligned_peak_number)
"""
alleles = channel.get_alleles()
# reset all peaks first
for p in channel.get_alleles():
p.size = -1
p.type = const.peaktype.scanned
#anchor_pairs = pairs
alignresult = align_ladder( alleles, ladder, anchor_pairs)
f = np.poly1d( alignresult.dpresult.z )
for (size, allele) in alignresult.dpresult.sized_peaks:
allele.dev = abs( f(allele.rtime) - size)
allele.size = size
allele.type = const.peaktype.ladder
return alignresult
def align_ladder( alleles, ladder, anchor_pairs):
if anchor_pairs:
return align_pm( alleles, ladder, anchor_pairs)
if len(alleles) <= len(ladder['sizes']) + 5:
result = align_hc( alleles, ladder )
if result.score > 0.9:
return result
return align_pm( alleles, ladder )
# end of function,
if result.initial_pairs:
result = align_gm( alleles, ladder, result.initial_pairs )
if result.score > 0.75:
return result
result = align_sh( alleles, ladder )
if result.score > 0.75:
return result
# perform differential evolution
return align_de( alleles, ladder )
raise RuntimeError
result = hclust_align( alleles, ladder )
# add relevant info to peaks
aligned_peaks = result[2][3]
f = np.poly1d( result[2][2] )
for (size, p) in aligned_peaks:
p.dev = abs( f(p.rtime) - size)
p.size = size
z, rss = estimate_z( [p[1].rtime for p in aligned_peaks],
[p[0] for p in aligned_peaks], 3)
print('>>> RSS:', rss)
#import pprint; pprint.pprint( aligned_peaks )
return result
def call_peaks(channel, params, func, min_rtime, max_rtime):
for allele in channel.alleles:
if not min_rtime < allele.rtime < max_rtime: continue
allele.size, allele.dev, allele.qcall, method = func(allele.rtime)
if allele.type == const.peaktype.scanned:
allele.type = const.peaktype.called
# helper functions
def find_raw_peaks(data, params, offset, expected_peak_number=0):
"""
params.min_dist
params.norm_thres
params.min_rfu
params.max_peak_number
"""
#print("expected:", expected_peak_number)
# cut and pad data to overcome peaks at the end of array
obs_data = np.append(data[offset:], [0,0,0])
if False: #expected_peak_number:
min_dist = params.min_dist
indices = []
norm_threshold = params.norm_thres
expected_peak_number = expected_peak_number * 1.8
while len(indices) <= expected_peak_number and norm_threshold > 1e-7:
indices = indexes( obs_data, norm_threshold, min_dist)
print(len(indices), norm_threshold)
norm_threshold *= 0.5
elif False:
indices = indexes( obs_data, params.norm_thres, params.min_dist)
indices = indexes( obs_data, 1e-7, params.min_dist)
cverr(5, '## indices: %s' % str(indices))
cverr(3, '## raw indices: %d' % len(indices))
if len(indices) == 0:
return []
# normalize indices
if offset > 0:
indices = indices + offset
# filter peaks by minimum rfu, and by maximum peak number after sorted by rfu
peaks = [Peak(int(i), int(data[i])) for i in indices
if data[i] >= params.min_rfu and params.min_rtime < i]
#peaks = sorted( peaks, key = lambda x: x.rfu )[:params.max_peak_number * 2]
#import pprint; pprint.pprint(peaks)
#print('======')
if expected_peak_number:
peaks.sort( key = lambda x: x.rfu, reverse = True )
peaks = peaks[: round(expected_peak_number * 2)]
peaks.sort( key = lambda x: x.rtime )
cverr(3, '## peak above min rfu: %d' % len(peaks))
return peaks
def find_peaks(data, params, offset=0, expected_peak_number=0):
peaks = find_raw_peaks(data, params, offset, expected_peak_number)
# check for any peaks
if not peaks:
return peaks
# measure peaks parameters
measure_peaks(peaks, data, offset)
#import pprint; pprint.pprint(peaks)
# filter artefact peaks if expected peak number is bigger
if expected_peak_number > 10:
non_artifact_peaks = filter_for_artifact(peaks, params, expected_peak_number)
else:
non_artifact_peaks = peaks
# for ladder, special filtering is applied
if params.expected_peak_number:
peaks = filter_for_ladder(non_artifact_peaks, params)
else:
peaks = non_artifact_peaks
return peaks
def measure_peaks(peaks, data, offset=0):
(q50, q70) = np.percentile( data[offset:], [50, 75] )
for p in peaks:
p.area, p.brtime, p.ertime, p.srtime, ls, rs = calculate_area( data,
p.rtime, 5e-2, q50 )
p.wrtime = p.ertime - p.brtime
p.beta = p.area / p.rfu
if p.wrtime == 0:
p.theta = 0
p.omega = 0
else:
p.theta = p.rfu / p.wrtime
p.omega = p.area / p.wrtime
def calculate_area(y, t, threshold, baseline):
""" return (area, brtime, ertime, srtime)
area: area
brtime: begin rtime
ertime: end rtime
"""
# right area
data = y[t:]
r_area, ertime, r_shared = half_area(data, threshold, baseline)
# left area
data = y[:t+1][::-1]
l_area, brtime, l_shared = half_area(data, threshold, baseline)
return ( l_area + r_area - y[t], t - brtime, ertime + t, math.log2(r_area / l_area),
l_shared, r_shared )
def half_area(y, threshold, baseline):
""" return (area, ertime, shared_status)
"""
winsize = 3
threshold = threshold/2
shared = False
area = y[0]
edge = float(np.sum(y[0:winsize]))/winsize
old_edge = 2 * edge
index = 1
limit = len(y)
while ( edge > area * threshold and edge < old_edge and
index < limit and y[index] >= baseline ):
old_edge = edge
area += y[index]
edge = float(np.sum(y[index:index+winsize]))/winsize
index += 1
if edge >= old_edge:
shared = True
index -= 1
return area, index, shared
def math_func(x, a, b):
#return a*np.exp(x*b)
return a*x + b
def quadratic_math_func(x, a, b, c):
return a*x**2 + b*x + c
def filter_for_artifact(peaks, params, expected_peak_number = 0):
"""
params.max_peak_number
params.artifact_ratio
params.artifact_dist ~ 5
"""
# the following code in this function performs the necessary acrobatic act
# to select the most likely peaks that can be considered as true signals,
# which is especially necessary for ladder - size assignment
if len(peaks) == expected_peak_number:
return peaks
# we need to adapt to the noise level of current channel
if expected_peak_number > 0:
epn = expected_peak_number
theta_peaks = sorted(peaks, key = lambda x: x.theta, reverse=True)[round(epn/2)+3:epn-1]
#theta_peaks = theta_peaks[2:4] + theta_peaks[round(epn/2):epn-1]
omega_peaks = sorted(peaks, key = lambda x: x.omega, reverse=True)
omega_peaks = omega_peaks[2:4] + omega_peaks[round(epn/2):epn-1]
rfu_peaks = sorted(peaks, key = lambda x: x.rfu, reverse=True)[:epn-1]
if theta_peaks[-1].theta < 8:
theta_peaks.sort()
thetas = np.array([ p.theta for p in theta_peaks ])
rtimes = [ p.rtime for p in theta_peaks ]
#plt.scatter(rtimes, thetas)
#plt.show()
popt, pcov = curve_fit( math_func, rtimes, 0.5 * thetas, p0 = [ -1, 1 ])
if is_verbosity(4):
xx = np.linspace( rtimes[0], rtimes[-1]+2000, 100 )
yy = math_func(xx, *popt)
plt.plot(xx, yy)
plt.scatter( [p.rtime for p in peaks], [p.theta for p in peaks])
plt.show()
q_theta = lambda x: x.theta >= math_func(x.rtime, *popt) or x.theta > 100
else:
q_theta = lambda x: x.theta >= min(theta_peaks[-1].theta, params.min_theta)
if omega_peaks[-1].omega < 200:
omega_peaks.sort()
omegas = np.array([ p.omega for p in omega_peaks ])
rtimes = np.array([ p.rtime for p in omega_peaks ])
# generate a quadratic threshold for omega
# generate a quadratic ratio series first
popt, pcov = curve_fit( quadratic_math_func,
[rtimes[0], (rtimes[0] + rtimes[-1])/2, rtimes[-1]],
[0.05, 0.25, 0.05])
ratios = quadratic_math_func(rtimes, *popt)
if is_verbosity(4):
plt.plot(rtimes, ratios)
plt.show()
# use the ratios to enforce quadratic threshold
popt, pcov = curve_fit( quadratic_math_func, rtimes, ratios * omegas,
p0 = [ -1, 1, 0 ])
if popt[0] > 0:
# enforce small flat ratio
popt, pcov = curve_fit( math_func, rtimes, 0.25 * omegas, p0 = [ 1, 0 ])
popt = np.insert(popt, 0, 0.0) # convert to 3 params
if is_verbosity(4):
plt.scatter(rtimes, omegas)
xx = np.linspace( rtimes[0], rtimes[-1]+2000, 100 )
yy = quadratic_math_func(xx, *popt)
plt.plot(xx, yy)
plt.scatter( [p.rtime for p in peaks], [p.omega for p in peaks])
plt.show()
q_omega = lambda x: ( x.omega >= 100 or
x.omega >= quadratic_math_func(x.rtime, *popt) )
else:
q_omega = lambda x: x.omega >= min(omega_peaks[-1].omega, 50)
min_rfu = rfu_peaks[-1].rfu * 0.125
else:
min_theta = 0
min_omega = 0
min_theta_omega = 0
min_rfu = 2
# filter for too sharp/thin peaks
filtered_peaks = []
for p in peaks:
#filtered_peaks.append(p); continue
cverr(5, str(p))
if len(filtered_peaks) < 2 and p.area > 50:
# first two real peaks might be a bit lower
filtered_peaks.append(p)
continue
if not q_omega(p):
cverr(5, '! q_omega')
continue
#if not q_theta(p):
# print('! q_theta')
# continue
#if min_theta and min_omega and p.omega < min_omega and p.theta < min_theta:
# print('! omega & theta')
# continue
#if min_theta_omega and p.theta * p.omega < min_theta_omega:
# print('! theta_omega')
# continue
if p.theta < 1.0 and p.area < 25 and p.omega < 5:
cverr(5, '! extreme theta & area & omega')
continue
if p.rfu < min_rfu:
cverr(5, '! extreme min_rfu')
continue
if p.beta > 25 and p.theta < 0.5:
cverr(5, '! extreme beta')
continue
if p.wrtime < 3:
continue
if p.rfu >= 25 and p.beta * p.theta < 6:
continue
if p.rfu < 25 and p.beta * p.theta < 3:
continue
#if p.omega < 50:
# continue
#if p.omega < 100 and p.theta < 5:
# continue
#if ( params.max_beta and min_theta and
# (p.beta > params.max_beta and p.theta < min_theta) ):
# print('! max_beta')
# continue
filtered_peaks.append(p)
#import pprint; pprint.pprint(filtered_peaks)
# filter for distance between peaks and their rfu ratio
peaks = sorted(filtered_peaks, key = lambda x: x.rtime)
non_artifact_peaks = []
for idx in range(len(peaks)):
p = peaks[idx]
if idx > 0:
prev_p = peaks[idx-1]
if ( p.brtime - prev_p.ertime < params.artifact_dist
and p.rfu < params.artifact_ratio * prev_p.rfu ):
# we are artifact, just skip
print('artifact1:', p)
continue
if idx < len(peaks)-1:
next_p = peaks[idx+1]
if ( next_p.brtime - p.ertime < params.artifact_dist
and p.rfu < params.artifact_ratio * next_p.rfu ):
# we are artifact, just skip
print('artefact2:', p)
continue
non_artifact_peaks.append( p )
#import pprint; pprint.pprint(non_artifact_peaks)
#print(len(non_artifact_peaks))
peaks = non_artifact_peaks
cverr(3, '## non artifact peaks: %d' % len(peaks))
return peaks
def filter_for_ladder(peaks, params):
"""
we need to obtaine enough peaks for ladder alignment purpose, but not too much to avoid
excessive alignment process and potentially incorrect alignment
peaks must in rtime ascending order
"""
epn = params.expected_peak_number # this is the number of ladder peaks
#
return peaks
def baseline_als(y, lam, p, niter=10):
pass
@attr.s
class NormalizedTrace(object):
signal = attr.ib()
baseline = attr.ib()
def get_qc(self):
""" return tuple of qcfunc
"""
return tuple()
def normalize_baseline( raw, medwinsize=399, savgol_size=11, savgol_order=5,
tophat_factor = 0.01 ):
"""
params.medwin_size
params.savgol_order
params.savgol_size
"""
median_line = signal.medfilt(raw, [medwinsize])
baseline = signal.savgol_filter( median_line, medwinsize, savgol_order)
corrected_baseline = raw - baseline
np.maximum(corrected_baseline, 0, out=corrected_baseline)
savgol = signal.savgol_filter(corrected_baseline, savgol_size, savgol_order)
smooth = ndimage.white_tophat(savgol, None,
np.repeat([1], int(round(raw.size * tophat_factor))))
return NormalizedTrace( signal=smooth, baseline = baseline )
@attr.s
class TraceChannel(object):
dye_name = attr.ib()
dye_wavelength = attr.ib()
raw_channel = attr.ib()
smooth_channel = attr.ib()
def b(txt):
""" return a binary string aka bytes """
return txt.encode('UTF-8')
from fatools.lib.fautil.traceio import WAVELENGTH
def separate_channels( trace ):
# return a list of [ 'dye name', dye_wavelength, numpy_array, numpy_smooth_baseline ]
results = []
for (idx, data_idx) in [ (1,1), (2,2), (3,3), (4,4), (5,105) ]:
try:
dye_name = trace.get_data(b('DyeN%d' % idx)).decode('UTF-8')
# below is to workaround on some strange dye names
if dye_name == '6FAM': dye_name = '6-FAM'
elif dye_name == 'PAT': dye_name = 'PET'
elif dye_name == 'Bn Joda': dye_name = 'LIZ'
try:
dye_wavelength = trace.get_data(b('DyeW%d' % idx))
except KeyError:
dye_wavelength = WAVELENGTH[dye_name]
raw_channel = np.array( trace.get_data(b('DATA%d' % data_idx)) )
nt = normalize_baseline( raw_channel )
results.append(
TraceChannel(dye_name, dye_wavelength, raw_channel, nt.signal)
)
except KeyError:
pass
return results
def generate_scoring_function( strict_params, relax_params ):
def _scoring_func( dp_result, method ):
# alignment_result is (dp_score, dp_rss, dp_z, dp_peaks)
dp_score = dp_result.dpscore
dp_rss = dp_result.rss
dp_peaks = dp_result.sized_peaks
if method == 'strict':
if ( dp_score >= strict_params['min_dpscore'] and
dp_rss <= strict_params['max_rss'] and
len(dp_peaks) >= strict_params['min_sizes'] ):
return (1, None)
return (0, None)
elif method == 'relax':
msg = []
# scoring based on parts of results
# score based on DP score compared to minimum DP score
delta_score = relax_params['min_dpscore'] - dp_score
if delta_score <= 0:
dp_score_part = 1
else:
dp_score_part = 1e-2 ** (1e-2 * delta_score)
# score based on RSS compared to the maximum allowed RSS
delta_rss = dp_rss - relax_params['max_rss']
if delta_rss <= 0:
dp_rss_part = 1
else:
dp_rss_part = 1e-2 ** ( 1e-3 * delta_rss )
msg.append( 'RSS > %d' % ( relax_params['max_rss'] ) )
# score based on how many peaks we might miss compared to minimum number of peaks
delta_peaks = relax_params['min_sizes'] - len(dp_peaks)
if delta_peaks <= 0:
dp_peaks_part = 1
else:
dp_peaks_part = max( 0, - delta_peaks / 0.5 * relax_params['min_sizes'] - 1)
msg.append( 'Missing peaks = %d' % delta_peaks )
# total overall score
score = 0.3 * dp_score_part + 0.5 * dp_rss_part + 0.2 * dp_peaks_part
return (score, msg)
raise RuntimeError("Shouldn't be here!")
return _scoring_func
def local_southern( ladder_alleles ):
""" southern local interpolation """
ladder_allele_sorted = SortedListWithKey( ladder_alleles, key = lambda k: k.rtime )
x = [ p.rtime for p in ladder_allele_sorted ]
y = [ p.size for p in ladder_allele_sorted ]
def _f( rtime ):
""" return (size, deviation)
deviation is calculated as delta square between curve1 and curve2
"""
idx = ladder_allele_sorted.bisect_key_right( rtime )
# left curve
z1 = np.polyfit( x[idx-2:idx+1], y[idx-2:idx+1], 2)
size1 = np.poly1d( z1 )(rtime)
min_score1 = min( x.qscore for x in ladder_allele_sorted[idx-2:idx+1] )
# right curve
z2 = np.polyfit( x[idx-1:idx+2], y[idx-1:idx+2], 2)
size2 = np.poly1d( z2 )(rtime)
min_score2 = min( x.qscore for x in ladder_allele_sorted[idx-1:idx+2] )
return ( (size1 + size2)/2, (size1 - size2) ** 2, (min_score1 + min_score2)/2,
const.allelemethod.localsouthern)
return _f
## this is a new algorithm and steps to perform peak analysis
##
## fsa = import_fsa()
## ladder_channel = fsa.ladder_channel()
## alleles = scan_peaks(ladder_channel, params)
## alleles = preannotate_peaks(ladder_channel, params)
## result = align_ladder(ladder_channel, params, size_standards)
##
## for channel in fsa.non_ladder_channel():
## scan_peaks(channel, params)
## preannotate_peaks(channel, params)
## call_peaks(channel, params)
## bin_peaks(channel, params)
## postannotate_peaks(channel, params)
## the high level methods
##
## fsa = import_fsa()
## fsa.align_ladder(params.ladder)
## fsa.scan_peaks(params.nonladder, marker=None)
## fsa.preannotate_peaks(params.nonladder, marker=None)
## fsa.call_peaks(params.nonladder, marker=None)
## fsa.bin_peaks(params.nonladder, marker=None) | lgpl-3.0 |
abimannans/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
depet/scikit-learn | examples/plot_permutation_test_for_classification.py | 8 | 2208 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#pl.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
pl.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
| bsd-3-clause |
fgbs/metrics | lib/influxdb/dataframe_client.py | 1 | 5853 | # -*- coding: utf-8 -*-
"""
DataFrame client for InfluxDB
"""
import math
import warnings
from .client import InfluxDBClient
class DataFrameClient(InfluxDBClient):
"""
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
def __init__(self, *args, **kwargs):
super(DataFrameClient, self).__init__(*args, **kwargs)
try:
global pd
import pandas as pd
except ImportError as ex:
raise ImportError(
'DataFrameClient requires Pandas, "{ex}" problem importing'
.format(ex=str(ex))
)
self.EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
def write_points(self, data, *args, **kwargs):
"""
write_points()
Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(len(data_frame)
/ float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
data = [self._convert_dataframe_to_json(
name=key,
dataframe=data_frame.ix[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points_with_precision(self, data,
*args, **kwargs)
return True
else:
data = [self._convert_dataframe_to_json(
name=key, dataframe=dataframe, time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points_with_precision(self, data,
*args, **kwargs)
def write_points_with_precision(self, data, time_precision='s'):
"""
DEPRECATED. Write to multiple time series names
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``DataFrameClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self.write_points(data, time_precision='s')
def query(self, query, time_precision='s', chunked=False):
"""
Quering data into a DataFrame.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
result = InfluxDBClient.query(self, query=query,
time_precision=time_precision,
chunked=chunked)
if len(result) > 0:
return self._to_dataframe(result[0], time_precision)
else:
return result
def _to_dataframe(self, json_result, time_precision):
dataframe = pd.DataFrame(data=json_result['points'],
columns=json_result['columns'])
if 'sequence_number' in dataframe.keys():
dataframe.sort(['time', 'sequence_number'], inplace=True)
else:
dataframe.sort(['time'], inplace=True)
pandas_time_unit = time_precision
if time_precision == 'm':
pandas_time_unit = 'ms'
elif time_precision == 'u':
pandas_time_unit = 'us'
dataframe.index = pd.to_datetime(list(dataframe['time']),
unit=pandas_time_unit,
utc=True)
del dataframe['time']
return dataframe
def _convert_dataframe_to_json(self, dataframe, name, time_precision='s'):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.tseries.period.PeriodIndex) or
isinstance(dataframe.index, pd.tseries.index.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or \
PeriodIndex.')
dataframe.index = dataframe.index.to_datetime()
if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')
dataframe['time'] = [self._datetime_to_epoch(dt, time_precision)
for dt in dataframe.index]
data = {'name': name,
'columns': [str(column) for column in dataframe.columns],
'points': list([list(x) for x in dataframe.values])}
return data
def _datetime_to_epoch(self, datetime, time_precision='s'):
seconds = (datetime - self.EPOCH).total_seconds()
if time_precision == 's':
return seconds
elif time_precision == 'm':
return seconds * 1000
elif time_precision == 'u':
return seconds * 1000000
| mit |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/volume/slices/_z.py | 2 | 6166 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Z(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume.slices"
_path_str = "volume.slices.z"
_valid_props = {"fill", "locations", "locationssrc", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the `slices` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# locations
# ---------
@property
def locations(self):
"""
Specifies the location(s) of slices on the axis. When not
specified slices would be created for all points of the axis z
except start and end.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
# locationssrc
# ------------
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for locations
.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
# show
# ----
@property
def show(self):
"""
Determines whether or not slice planes about the z dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis z except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
show
Determines whether or not slice planes about the z
dimension are drawn.
"""
def __init__(
self,
arg=None,
fill=None,
locations=None,
locationssrc=None,
show=None,
**kwargs
):
"""
Construct a new Z object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.slices.Z`
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis z except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
show
Determines whether or not slice planes about the z
dimension are drawn.
Returns
-------
Z
"""
super(Z, self).__init__("z")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.slices.Z
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.slices.Z`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("locations", None)
_v = locations if locations is not None else _v
if _v is not None:
self["locations"] = _v
_v = arg.pop("locationssrc", None)
_v = locationssrc if locationssrc is not None else _v
if _v is not None:
self["locationssrc"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
pstjohn/decay-methods | high_throughput/zhang/fit_sinusoids.py | 1 | 1614 | import pandas as pd
import numpy as np
from methods.Futures import map_
from methods.sinusoid_estimation import fit_data
# Load time-series data on all probes
lumin = pd.read_csv('lumin.csv')
names = pd.read_csv('names.csv')
# Prune the initial transients from the dataset.
times_considered = np.s_[2:] # Save slice for later importing
lumin = lumin.iloc[:, times_considered]
def fit(row):
""" Wrapper function of the exponential sinusoid fitting function """
return fit_data(row.values, names.iloc[row.name].sampling_period,
outliers=True)
# The following two functions allow this fitting operation to be easily
# distributed over several compute nodes using python's scoop module.
# The times data frame is sliced into sections of 100 experiments before
# being mapped onto the nodes
data_len = len(lumin)
def slice_data(section_size):
""" Function to return a generator of indicies to slice the data
array into smaller sections """
ind_start = 0
while ind_start + section_size < data_len:
yield (ind_start, ind_start + section_size)
ind_start += section_size
yield (ind_start, data_len)
def fit_section(slice_tuple):
fitted_section = lumin[slice(*slice_tuple)].apply(fit, axis=1)
return fitted_section
# Placing the majority of the computational work in the following if
# statement allows the other functions to be imported without triggering
# sinsoid fitting
if __name__ == "__main__":
results = list(map_(fit_section, slice_data(100)))
fitted_results = pd.concat(results)
fitted_results.to_csv('blfit.csv')
| gpl-3.0 |
justrypython/EAST | svm_model_neg_v1.py | 1 | 2837 | #encoding:UTF-8
import os
import numpy as np
import sys
import cv2
import matplotlib.pyplot as plt
from sklearn.svm import NuSVC, SVC
import datetime
import pickle
#calculate the area
def area(p):
p = p.reshape((-1, 2))
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, np.concatenate((p[1:], [p[0]])))
def calc_xy(p0, p1, p2):
cos = calc_cos(p0, p1, p2)
dis = calc_dis(p0, p2)
return dis * cos, dis * np.sqrt(1 - np.square(cos))
def calc_dis(p0, p1):
return np.sqrt(np.sum(np.square(p0-p1)))
def calc_cos(p0, p1, p2):
A = p1 - p0
B = p2 - p0
num = np.dot(A, B)
demon = np.linalg.norm(A) * np.linalg.norm(B)
return num / demon
def calc_new_xy(boxes):
box0 = boxes[:8]
box1 = boxes[8:]
x, y = calc_xy(box1[4:6], box1[6:], box0[:2])
dis = calc_dis(box1[4:6], box1[6:])
area0 = area(box0)
area1 = area(box1)
return x/dis, y/dis, area0/area1
if __name__ == '__main__':
test = False
path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/'
paths = os.listdir(path)
paths = [i for i in paths if '.txt' in i]
boxes = np.empty((480000, 9))
cnt = 0
for txt in paths:
f = open(path+txt, 'r')
lines = f.readlines()
f.close()
lines = [i.replace('\n', '').split(',') for i in lines]
lines = np.array(lines).astype(np.uint32)
boxes[cnt*6:cnt*6+len(lines)] = lines
cnt += 1
#zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0]
#zeros_labels = zeros.all(axis=1)
#zeros_labels = np.where(zeros_labels==True)
idboxes = boxes[boxes[:, 8]==16]
idboxes = np.tile(idboxes[:, :8], (1, 6))
idboxes = idboxes.reshape((-1, 8))
#boxes = np.delete(boxes, zeros_labels[0], axis=0)
#idboxes = np.delete(idboxes, zeros_labels[0], axis=0)
boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1)
start_time = datetime.datetime.now()
print start_time
new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes)
end_time = datetime.datetime.now()
print end_time - start_time
if test:
with open('clf_address_neg_v1.pickle', 'rb') as f:
clf = pickle.load(f)
cnt = 0
for i, xy in enumerate(new_xy):
cls = int(clf.predict([xy])[0])
if cls == int(boxes[i, 8]):
cnt += 1
if i % 10000 == 0 and i != 0:
print i, ':', float(cnt) / i
else:
clf = SVC()
start_time = datetime.datetime.now()
print start_time
clf.fit(new_xy[:], boxes[:, 8])
end_time = datetime.datetime.now()
print end_time - start_time
with open('clf_address_neg_v1.pickle', 'wb') as f:
pickle.dump(clf, f)
print 'end' | gpl-3.0 |
f3r/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 28 | 10384 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float32)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
adammenges/statsmodels | docs/sphinxext/numpy_ext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
McIntyre-Lab/papers | fear_sem_sd_2015/scripts/rank_ggm_network_analysis_shortest_path.py | 1 | 2602 | #!/usr/bin/env python
import os
import logging
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import pickle
import itertools
def setLogger(fname,loglevel):
""" Function to handle error logging """
logging.basicConfig(filename=fname, filemode='w', level=loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
def readData(fname):
""" Importing a large DOT file is slow. This function will read a pickle
file if available. If no pickle, then read DOT and create a pickle for next
time. """
pname = os.path.splitext(fname)[0] + ".gpickle"
try:
# If there is a pickle, unpickle it
logging.info("Unpickling file")
nxGraph = nx.Graph(nx.read_gpickle(pname))
except:
logging.info("No Pickled file, will import DOT")
try:
# No pickle, try the dot file
logging.info("Importing dot file")
nxGraph = nx.Graph(nx.read_dot(fname))
# Make pickle for next time
logging.info("Pickle graph for later use.")
nx.write_gpickle(nxGraph,pname)
except Exception:
logging.exception("Please provide a DOT formated file.")
return(nxGraph)
def writeHeader(handle):
""" Write a header on to the csv file """
handle.write("gene1,gene2,shortest_distance\n")
def writeOutput(gene1, gene2, dist, handle):
handle.write("{0},{1},{2}\n".format(gene1, gene2, dist))
if __name__ == "__main__":
dname = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/rank_ggm/dsrp_ggm_isoforms_FDR2.dot'
oname = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/rank_ggm/dsrp_ggm_isoforms_FDR2_shortest_path_table.csv'
lname = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/rank_ggm/dsrp_ggm_isoforms_FDR2_shortest_path_table.log'
# Turn on Logging if option --log was given
setLogger(lname,logging.INFO)
# Import Dot File
mygraph = readData(dname)
# Create gene list by pulling all genes that don't start with 'CG'
logging.info("Creating gene list")
geneList = [x for x in mygraph.nodes_iter(data=False) if not x.startswith('CG')]
# Iterate through all of my genes of interest output their primary and
# secondary neighbors
logging.info("Finding neighbors and writing output")
with open(oname, 'w') as OUT:
writeHeader(OUT)
for permut in itertools.combinations(geneList,2):
dist = nx.shortest_path_length(mygraph, permut[0], permut[1])
writeOutput(permut[0],permut[1],dist,OUT)
logging.info("Script Complete")
| lgpl-3.0 |
jlegendary/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/series/test_misc_api.py | 7 | 11789 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import Index, Series, DataFrame, date_range
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas import compat
import pandas.formats.printing as printing
from pandas.util.testing import (assert_series_equal,
ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class SharedWithSparse(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEqual(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEqual(result.name, self.ts.name)
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
self.assertIsNone(self.ts.index.name)
self.assertIs(self.ts, self.ts)
cp = self.ts.copy()
cp.index.name = 'foo'
printing.pprint_thing(self.ts.index.name)
self.assertIsNone(self.ts.index.name)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEqual(result.name, self.ts.name)
result = self.ts.mul(self.ts)
self.assertEqual(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assertIsNone(result.name)
result = self.ts.add(cp)
self.assertIsNone(result.name)
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']
ops = ops + ['r' + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'changed'
result = getattr(s, op)(cp)
self.assertIsNone(result.name)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEqual(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEqual(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEqual(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEqual(result.name, self.ts.name)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEqual(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEqual(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEqual(result.name, self.ts.name)
class TestSeriesMisc(TestData, SharedWithSparse, tm.TestCase):
_multiprocess_can_split_ = True
def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
self.assertTrue('str' in dir(s))
self.assertTrue('dt' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .dt
s = Series(date_range('1/1/2015', periods=5))
self.assertTrue('dt' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str
s = Series(list('abbcd'), dtype="category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' in dir(s)) # as it is a string categorical
self.assertTrue('dt' not in dir(s))
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('dt' in dir(s)) # as it is a datetime categorical
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
self.assertRaises(TypeError, hash, s_empty)
self.assertRaises(TypeError, hash, s)
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_iter_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Timestamp)
self.assertEqual(res, exp)
self.assertIsNone(res.tz)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Timestamp)
self.assertEqual(res, exp)
self.assertEqual(res.tz, exp.tz)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Timedelta)
self.assertEqual(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Period)
self.assertEqual(res, exp)
self.assertEqual(res.freq, 'M')
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assertIs(getkeys(), self.ts.index)
def test_values(self):
self.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
self.assertEqual(val, self.series[idx])
for idx, val in compat.iteritems(self.ts):
self.assertEqual(val, self.ts[idx])
# assert is lazy (genrators don't define reverse, lists do)
self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with tm.assertRaises(AttributeError):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype='float64')
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
self.assertTrue(np.isnan(s2[0]))
self.assertFalse(np.isnan(s[0]))
else:
# we DID modify the original Series
self.assertTrue(np.isnan(s2[0]))
self.assertTrue(np.isnan(s[0]))
# GH 11794
# copy of tz-aware
expected = Series([Timestamp('2012/01/01', tz='UTC')])
expected2 = Series([Timestamp('1999/01/01', tz='UTC')])
for deep in [None, False, True]:
s = Series([Timestamp('2012/01/01', tz='UTC')])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp('1999/01/01', tz='UTC')
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected)
else:
# we DID modify the original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
self.assertEqual(s.dropna().sum('rows'), 3)
self.assertEqual(s._get_axis_number('rows'), 0)
self.assertEqual(s._get_axis_name('rows'), 'index')
def test_numpy_unique(self):
# it works!
np.unique(self.ts)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.argmax()]
result = tsdf.apply(f)
expected = tsdf.max()
assert_series_equal(result, expected)
# .item()
s = Series([1])
result = s.item()
self.assertEqual(result, 1)
self.assertEqual(s.item(), s.iloc[0])
# using an ndarray like function
s = Series(np.random.randn(10))
result = np.ones_like(s)
expected = Series(1, index=range(10), dtype='float64')
# assert_series_equal(result,expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'), s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0, 1., -1], index=list('abc'))
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=['b']))
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='object'))
assert_series_equal(result, exp)
s = Series([0, 1., -1], index=[.1, .2, .3])
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=[.2]))
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='float64'))
assert_series_equal(result, exp)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
s = Series([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):
s.str.repeat(2)
| apache-2.0 |
SimonSuster/semafor | src/main/python/semafor/framenet/pmi.py | 5 | 1525 | from itertools import chain, combinations, product
import codecs
import json
from math import log
import networkx as nx
import matplotlib as plt
from nltk import FreqDist
from semafor.framenet.frames import FrameHierarchy
THRESHOLD = 4
def draw_graph(graph):
pos = nx.graphviz_layout(graph, prog='dot')
nx.draw(graph, pos, node_color='#A0CBE2', edge_color='#BB0000', width=2, edge_cmap=plt.cm.Blues,
with_labels=True)
def pmi(a, b):
return log(pairs[a, b]) - log(pairs.N()) - log(unigrams[a]) - log(unigrams[b]) + 2 * log(
unigrams.N())
h = FrameHierarchy.load()
# training data contains a bad frame
valid_names = {f.name for f in h._frames.values()}
with codecs.open("../../../training/data/naacl2012/cv.train.sentences.json", encoding="utf8") as train_file:
train = [json.loads(line) for line in train_file]
unsorted_frames = ([(f['target']['spans'][0]['start'], f['target']['name'])
for f in s['frames']] for s in train)
frames = [[name for start, name in sorted(s) if name in valid_names]
for s in unsorted_frames]
del unsorted_frames
unigrams = FreqDist(chain(*frames))
pairs = FreqDist(chain(*[[tuple(sorted(b)) for b in combinations(f, 2)] for f in frames]))
pmis = FreqDist({
(a, b): pmi(a, b)
for a, b in pairs.keys()
if unigrams[a] >= THRESHOLD and unigrams[b] >= THRESHOLD
})
unigrams_with_ancestors = FreqDist(unigrams)
for u in unigrams:
for a in h.ancestors(h._frames[u]):
unigrams_with_ancestors.inc(a.name) | gpl-3.0 |
Enopoletus/enopoletus.github.io | mpdinteractiveboth.py | 1 | 1878 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
xl=pd.ExcelFile("mpd2018.xlsx")
dfr=xl.parse('rgdpnapc')
dfc=xl.parse('cgdppc')
dfp=xl.parse('pop')
dfc.columns=dfc.iloc[0]
dfr.columns=dfr.iloc[0]
dfp.columns=dfp.iloc[0]
dfc.drop(dfc.index[0], inplace=True)
dfr.drop(dfr.index[0], inplace=True)
dfp.drop(dfp.index[0], inplace=True)
dfc.year=pd.to_numeric(dfc.year)
dfr.year=pd.to_numeric(dfr.year)
dfp.year=pd.to_numeric(dfp.year)
dfr.set_index('year', inplace=True)
dfc.set_index('year', inplace=True)
dfp.set_index('year', inplace=True)
fig, ax = plt.subplots()
yournames=input("Enter 3-letter country codes; separate by space: ")
yournames=yournames.split(' ')
logorlin=input("log or linear? ")
startd=int(input("Enter start year: "))
endd=int(input("Enter end year: "))
plt.xticks(rotation=90)
axes = plt.gca()
axes.set_xlim([startd, endd])
axes.set_yscale(logorlin)
start, end = ax.get_xlim()
plt.xticks(np.arange(start, end, 10))
vol1=[]
for a in yournames:
ratic=dfc[a]
ratir=dfr[a]
ratic=ratic.astype(np.double)
ratir=ratir.astype(np.double)
maskc=np.isfinite(ratic)
maskr=np.isfinite(ratir)
ratic=ratic[maskc]
ratir=ratir[maskr]
max1=max(ratir.loc[startd:endd])
max2=max(ratic.loc[startd:endd])
maxx=max(max1,max2)
vol1.append(maxx)
min1=min(ratir.loc[startd:endd])
min2=min(ratic.loc[startd:endd])
minx=min(min1,min2)
vol1.append(minx)
qz=yournames.index(a)/(len(yournames))
qz=round(qz, 5)
plt.plot(ratic, color=[0, qz, 1], lw='1', label=a)
plt.plot(ratir, color=[1, qz, 0], lw='1')
volmax=max(vol1)
volmin=min(vol1)
axes.set_ylim([volmin,volmax])
plt.legend(loc='best', fontsize='xx-small', labelspacing=0.2)
plt.grid(which='minor',axis='y')
plt.grid(which='major',axis='y')
plt.grid(which='major',axis='x')
plt.ylabel("GDP per capita (PPP), 2011 USD")
plt.show()
| mit |
frankiecrouch/Bayesian-Decision-Trees | Scripts/Serial-BCART/SerialBCART.py | 1 | 6179 | from BayesianTree import Node, Tree, acceptance
import numpy as np
import pandas as pd
import copy
import time
start_time = time.time()
#****************************************************************************************************
# read in the data
#****************************************************************************************************
train_data = pd.read_csv("/Users/Frankie/Documents/Dissertation/Data/pancreatic/pancreatic_1_train.csv")
y_train = train_data['label'].as_matrix()
X_train = train_data.drop('label', axis=1).as_matrix()
test_data = pd.read_csv("/Users/Frankie/Documents/Dissertation/Data/pancreatic/pancreatic_1_test.csv")
y_test = test_data['label'].as_matrix()
X_test = test_data.drop('label', axis=1).as_matrix()
end_data_load = time.time()
#****************************************************************************************************
# set parameters: no. of iterations, no. of repeats, alpha and beta
#****************************************************************************************************
iterations = 5000
repeat = 500
alpha = 0.95
beta = 1.5
#****************************************************************************************************
# create arrays to store results:
# - AUC on the training data
# - AUC on the testing data
# - runtime
#****************************************************************************************************
results_auc_train = np.zeros((repeat , iterations), dtype = np.object)
results_auc_test = np.zeros((repeat , iterations), dtype = np.object)
results_runtime = np.zeros((repeat , iterations), dtype = np.float)
#****************************************************************************************************
# create the starting tree with just a root node
#****************************************************************************************************
starting_indices = np.arange(X_train.shape[0])
rootNode = Node(data_indices = starting_indices)
tree = Tree(root_node=rootNode, alpha = alpha, beta = beta, X=X_train, Y=y_train)
tree.calc_likelihood()
#****************************************************************************************************
# start the iterations
#****************************************************************************************************
for j in range(0,repeat):
# create a copy of the root node tree add the beginning of each MCMC chain
current_tree = copy.deepcopy(tree)
for i in range (0,iterations):
# uncomment to see progress as script is running
print "repeat " +str(j) + " iteration " +str(i)
# start timer
start = time.time()
# generate the candidate tree
candidate_tree = copy.deepcopy(current_tree)
# CHANGE, PRUNE, CHANGE OR SWAP
random_proposal = np.random.randint(4)
if random_proposal == 0:
candidate_tree.grow()
elif random_proposal == 1:
candidate_tree.prune()
elif random_proposal == 2:
candidate_tree.change()
elif random_proposal == 3:
candidate_tree.swap()
# update the likelihood of the candidate tree
candidate_tree.calc_likelihood()
# calc acceptance
acpt = acceptance(current_tree, candidate_tree)
# generate random number
random_acceptance = np.random.uniform(0,1)
# update tree if accepting
if random_acceptance < acpt:
current_tree = copy.deepcopy(candidate_tree)
# uncomment to print the tree
# filename = "tree_" + str(i)
# current_tree.printTree(filename = filename)
#end timer
stop = time.time()
# record the results
auc_train = current_tree.train_auc()
auc_test = current_tree.test_auc(X_test, y_test)
results_auc_train[j][i] = auc_train
results_auc_test[j][i] = auc_test
results_runtime[j][i] = (stop-start)
end_total = time.time()
#****************************************************************************************************
# find the best tree from each chain by chosing the tree with the max AUC
#****************************************************************************************************
arg_max_auc = np.argmax(results_auc_train, axis = 1)
all_results = []
for i in range(0,repeat):
all_results.append(results_auc_test[i][arg_max_auc[i]])
# calculate the average AUC and stdv
mean_result = np.average(np.asarray(all_results))
std = np.std(np.asarray(all_results))
#****************************************************************************************************
# export results
#****************************************************************************************************
# raw data
np.savetxt("auc_test.txt", results_auc_test, delimiter=',')
np.savetxt("auc_train.txt", results_auc_train, delimiter=',')
np.savetxt("runtime.txt", results_runtime, delimiter=',')
# summary of the runtime results
total_iterations_time = (np.sum(results_runtime))/60
min_chain = (np.min(np.sum(results_runtime, axis=1)))/60
max_chain = (np.max(np.sum(results_runtime, axis=1)))/60
ave_chain = (np.mean(np.sum(results_runtime, axis=1)))/60
total_runtime = (end_total - start_time)/60
load_data_time = (end_data_load - start_time)/60
with open('time_results.txt', 'w' ) as f:
f.write("Total runtime was %f minutes" % total_runtime)
f.write(", which is %f hours \n" % (total_runtime/60))
f.write("The data load took %f minutes \n" % load_data_time)
f.write("The total time spent doing the MCMC chains was %f minutes \n" % total_iterations_time)
f.write("The min, max and average MCMC chain of length %d was: %f, %f, %f minutes" % (iterations, min_chain, max_chain,ave_chain))
# summary of the prediction results
with open(('results_summary.txt'), 'w') as f:
f.write('beta, AUC, stdv \n')
f.write(str(beta) + "," +
str(mean_result) + ","+
str(std))
| mit |
lazywei/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
akionakamura/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
ceph/autotest | utils/external_packages.py | 2 | 28756 | #!/usr/bin/python
#
# Please keep this code python 2.4 compatible and stand alone.
import logging, os, shutil, sys, tempfile, time, urllib2
import subprocess, re
from autotest_lib.client.common_lib import utils
_READ_SIZE = 64*1024
_MAX_PACKAGE_SIZE = 100*1024*1024
class Error(Exception):
"""Local exception to be raised by code in this file."""
class FetchError(Error):
"""Failed to fetch a package from any of its listed URLs."""
def _checksum_file(full_path):
"""@returns The hex checksum of a file given its pathname."""
inputfile = open(full_path, 'rb')
try:
hex_sum = utils.hash('sha1', inputfile.read()).hexdigest()
finally:
inputfile.close()
return hex_sum
def system(commandline):
"""Same as os.system(commandline) but logs the command first."""
logging.info(commandline)
return os.system(commandline)
def find_top_of_autotest_tree():
"""@returns The full path to the top of the autotest directory tree."""
dirname = os.path.dirname(__file__)
autotest_dir = os.path.abspath(os.path.join(dirname, '..'))
return autotest_dir
class ExternalPackage(object):
"""
Defines an external package with URLs to fetch its sources from and
a build_and_install() method to unpack it, build it and install it
beneath our own autotest/site-packages directory.
Base Class. Subclass this to define packages.
Attributes:
@attribute urls - A tuple of URLs to try fetching the package from.
@attribute local_filename - A local filename to use when saving the
fetched package.
@attribute hex_sum - The hex digest (currently SHA1) of this package
to be used to verify its contents.
@attribute module_name - The installed python module name to be used for
for a version check. Defaults to the lower case class name with
the word Package stripped off.
@attribute version - The desired minimum package version.
@attribute os_requirements - A dictionary mapping a file pathname on the
the OS distribution to a likely name of a package the user
needs to install on their system in order to get this file.
@attribute name - Read only, the printable name of the package.
@attribute subclasses - This class attribute holds a list of all defined
subclasses. It is constructed dynamically using the metaclass.
"""
subclasses = []
urls = ()
local_filename = None
hex_sum = None
module_name = None
version = None
os_requirements = None
class __metaclass__(type):
"""Any time a subclass is defined, add it to our list."""
def __init__(mcs, name, bases, dict):
if name != 'ExternalPackage':
mcs.subclasses.append(mcs)
def __init__(self):
self.verified_package = ''
if not self.module_name:
self.module_name = self.name.lower()
self.installed_version = ''
@property
def name(self):
"""Return the class name with any trailing 'Package' stripped off."""
class_name = self.__class__.__name__
if class_name.endswith('Package'):
return class_name[:-len('Package')]
return class_name
def is_needed(self, unused_install_dir):
"""@returns True if self.module_name needs to be built and installed."""
if not self.module_name or not self.version:
logging.warning('version and module_name required for '
'is_needed() check to work.')
return True
try:
module = __import__(self.module_name)
except ImportError, e:
logging.info("%s isn't present. Will install.", self.module_name)
return True
self.installed_version = self._get_installed_version_from_module(module)
logging.info('imported %s version %s.', self.module_name,
self.installed_version)
return self.version > self.installed_version
def _get_installed_version_from_module(self, module):
"""Ask our module its version string and return it or '' if unknown."""
try:
return module.__version__
except AttributeError:
logging.error('could not get version from %s', module)
return ''
def _build_and_install(self, install_dir):
"""Subclasses MUST provide their own implementation."""
raise NotImplementedError
def _build_and_install_current_dir(self, install_dir):
"""
Subclasses that use _build_and_install_from_package() MUST provide
their own implementation of this method.
"""
raise NotImplementedError
def build_and_install(self, install_dir):
"""
Builds and installs the package. It must have been fetched already.
@param install_dir - The package installation directory. If it does
not exist it will be created.
"""
if not self.verified_package:
raise Error('Must call fetch() first. - %s' % self.name)
self._check_os_requirements()
return self._build_and_install(install_dir)
def _check_os_requirements(self):
if not self.os_requirements:
return
failed = False
for file_name, package_name in self.os_requirements.iteritems():
if not os.path.exists(file_name):
failed = True
logging.error('File %s not found, %s needs it.',
file_name, self.name)
logging.error('Perhaps you need to install something similar '
'to the %s package for OS first.', package_name)
if failed:
raise Error('Missing OS requirements for %s. (see above)' %
self.name)
def _build_and_install_current_dir_setup_py(self, install_dir):
"""For use as a _build_and_install_current_dir implementation."""
egg_path = self._build_egg_using_setup_py(setup_py='setup.py')
if not egg_path:
return False
return self._install_from_egg(install_dir, egg_path)
def _build_and_install_current_dir_setupegg_py(self, install_dir):
"""For use as a _build_and_install_current_dir implementation."""
egg_path = self._build_egg_using_setup_py(setup_py='setupegg.py')
if not egg_path:
return False
return self._install_from_egg(install_dir, egg_path)
def _build_and_install_current_dir_noegg(self, install_dir):
if not self._build_using_setup_py():
return False
return self._install_using_setup_py_and_rsync(install_dir)
def _build_and_install_from_package(self, install_dir):
"""
This method may be used as a _build_and_install() implementation
for subclasses if they implement _build_and_install_current_dir().
Extracts the .tar.gz file, chdirs into the extracted directory
(which is assumed to match the tar filename) and calls
_build_and_isntall_current_dir from there.
Afterwards the build (regardless of failure) extracted .tar.gz
directory is cleaned up.
@returns True on success, False otherwise.
@raises OSError If the expected extraction directory does not exist.
"""
self._extract_compressed_package()
if self.verified_package.endswith('.tar.gz'):
extension = '.tar.gz'
elif self.verified_package.endswith('.tar.bz2'):
extension = '.tar.bz2'
elif self.verified_package.endswith('.zip'):
extension = '.zip'
else:
raise Error('Unexpected package file extension on %s' %
self.verified_package)
os.chdir(os.path.dirname(self.verified_package))
os.chdir(self.local_filename[:-len(extension)])
extracted_dir = os.getcwd()
try:
return self._build_and_install_current_dir(install_dir)
finally:
os.chdir(os.path.join(extracted_dir, '..'))
shutil.rmtree(extracted_dir)
def _extract_compressed_package(self):
"""Extract the fetched compressed .tar or .zip within its directory."""
if not self.verified_package:
raise Error('Package must have been fetched first.')
os.chdir(os.path.dirname(self.verified_package))
if self.verified_package.endswith('gz'):
status = system("tar -xzf '%s'" % self.verified_package)
elif self.verified_package.endswith('bz2'):
status = system("tar -xjf '%s'" % self.verified_package)
elif self.verified_package.endswith('zip'):
status = system("unzip '%s'" % self.verified_package)
else:
raise Error('Unknown compression suffix on %s.' %
self.verified_package)
if status:
raise Error('tar failed with %s' % (status,))
def _build_using_setup_py(self, setup_py='setup.py'):
"""
Assuming the cwd is the extracted python package, execute a simple
python setup.py build.
@param setup_py - The name of the setup.py file to execute.
@returns True on success, False otherwise.
"""
if not os.path.exists(setup_py):
raise Error('%s does not exist in %s' % (setup_py, os.getcwd()))
status = system("'%s' %s build" % (sys.executable, setup_py))
if status:
logging.error('%s build failed.' % self.name)
return False
return True
def _build_egg_using_setup_py(self, setup_py='setup.py'):
"""
Assuming the cwd is the extracted python package, execute a simple
python setup.py bdist_egg.
@param setup_py - The name of the setup.py file to execute.
@returns The relative path to the resulting egg file or '' on failure.
"""
if not os.path.exists(setup_py):
raise Error('%s does not exist in %s' % (setup_py, os.getcwd()))
egg_subdir = 'dist'
if os.path.isdir(egg_subdir):
shutil.rmtree(egg_subdir)
status = system("'%s' %s bdist_egg" % (sys.executable, setup_py))
if status:
logging.error('bdist_egg of setuptools failed.')
return ''
# I've never seen a bdist_egg lay multiple .egg files.
for filename in os.listdir(egg_subdir):
if filename.endswith('.egg'):
return os.path.join(egg_subdir, filename)
def _install_from_egg(self, install_dir, egg_path):
"""
Install a module from an egg file by unzipping the necessary parts
into install_dir.
@param install_dir - The installation directory.
@param egg_path - The pathname of the egg file.
"""
status = system("unzip -q -o -d '%s' '%s'" % (install_dir, egg_path))
if status:
logging.error('unzip of %s failed', egg_path)
return False
egg_info = os.path.join(install_dir, 'EGG-INFO')
if os.path.isdir(egg_info):
shutil.rmtree(egg_info)
return True
def _get_temp_dir(self):
return tempfile.mkdtemp(dir='/var/tmp')
def _site_packages_path(self, temp_dir):
# This makes assumptions about what python setup.py install
# does when given a prefix. Is this always correct?
python_xy = 'python%s' % sys.version[:3]
return os.path.join(temp_dir, 'lib', python_xy, 'site-packages')
def _install_using_setup_py_and_rsync(self, install_dir,
setup_py='setup.py',
temp_dir=None):
"""
Assuming the cwd is the extracted python package, execute a simple:
python setup.py install --prefix=BLA
BLA will be a temporary directory that everything installed will
be picked out of and rsynced to the appropriate place under
install_dir afterwards.
Afterwards, it deconstructs the extra lib/pythonX.Y/site-packages/
directory tree that setuptools created and moves all installed
site-packages directly up into install_dir itself.
@param install_dir the directory for the install to happen under.
@param setup_py - The name of the setup.py file to execute.
@returns True on success, False otherwise.
"""
if not os.path.exists(setup_py):
raise Error('%s does not exist in %s' % (setup_py, os.getcwd()))
if temp_dir is None:
temp_dir = self._get_temp_dir()
try:
status = system("'%s' %s install --no-compile --prefix='%s'"
% (sys.executable, setup_py, temp_dir))
if status:
logging.error('%s install failed.' % self.name)
return False
if os.path.isdir(os.path.join(temp_dir, 'lib')):
# NOTE: This ignores anything outside of the lib/ dir that
# was installed.
temp_site_dir = self._site_packages_path(temp_dir)
else:
temp_site_dir = temp_dir
status = system("rsync -r '%s/' '%s/'" %
(temp_site_dir, install_dir))
if status:
logging.error('%s rsync to install_dir failed.' % self.name)
return False
return True
finally:
shutil.rmtree(temp_dir)
def _build_using_make(self, install_dir):
"""Build the current package using configure/make.
@returns True on success, False otherwise.
"""
install_prefix = os.path.join(install_dir, 'usr', 'local')
status = system('./configure --prefix=%s' % install_prefix)
if status:
logging.error('./configure failed for %s', self.name)
return False
status = system('make')
if status:
logging.error('make failed for %s', self.name)
return False
status = system('make check')
if status:
logging.error('make check failed for %s', self.name)
return False
return True
def _install_using_make(self):
"""Install the current package using make install.
Assumes the install path was set up while running ./configure (in
_build_using_make()).
@returns True on success, False otherwise.
"""
status = system('make install')
return status == 0
def fetch(self, dest_dir):
"""
Fetch the package from one its URLs and save it in dest_dir.
If the the package already exists in dest_dir and the checksum
matches this code will not fetch it again.
Sets the 'verified_package' attribute with the destination pathname.
@param dest_dir - The destination directory to save the local file.
If it does not exist it will be created.
@returns A boolean indicating if we the package is now in dest_dir.
@raises FetchError - When something unexpected happens.
"""
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
local_path = os.path.join(dest_dir, self.local_filename)
# If the package exists, verify its checksum and be happy if it is good.
if os.path.exists(local_path):
actual_hex_sum = _checksum_file(local_path)
if self.hex_sum == actual_hex_sum:
logging.info('Good checksum for existing %s package.',
self.name)
self.verified_package = local_path
return True
logging.warning('Bad checksum for existing %s package. '
'Re-downloading', self.name)
os.rename(local_path, local_path + '.wrong-checksum')
# Download the package from one of its urls, rejecting any if the
# checksum does not match.
for url in self.urls:
logging.info('Fetching %s', url)
try:
url_file = urllib2.urlopen(url)
except (urllib2.URLError, EnvironmentError):
logging.warning('Could not fetch %s package from %s.',
self.name, url)
continue
data_length = int(url_file.info().get('Content-Length',
_MAX_PACKAGE_SIZE))
if data_length <= 0 or data_length > _MAX_PACKAGE_SIZE:
raise FetchError('%s from %s fails Content-Length %d '
'sanity check.' % (self.name, url,
data_length))
checksum = utils.hash('sha1')
total_read = 0
output = open(local_path, 'wb')
try:
while total_read < data_length:
data = url_file.read(_READ_SIZE)
if not data:
break
output.write(data)
checksum.update(data)
total_read += len(data)
finally:
output.close()
if self.hex_sum != checksum.hexdigest():
logging.warning('Bad checksum for %s fetched from %s.',
self.name, url)
logging.warning('Got %s', checksum.hexdigest())
logging.warning('Expected %s', self.hex_sum)
os.unlink(local_path)
continue
logging.info('Good checksum.')
self.verified_package = local_path
return True
else:
return False
# NOTE: This class definition must come -before- all other ExternalPackage
# classes that need to use this version of setuptools so that is is inserted
# into the ExternalPackage.subclasses list before them.
class SetuptoolsPackage(ExternalPackage):
# For all known setuptools releases a string compare works for the
# version string. Hopefully they never release a 0.10. (Their own
# version comparison code would break if they did.)
version = '0.6c11'
urls = ('http://pypi.python.org/packages/source/s/setuptools/'
'setuptools-%s.tar.gz' % (version,),)
local_filename = 'setuptools-%s.tar.gz' % version
hex_sum = '8d1ad6384d358c547c50c60f1bfdb3362c6c4a7d'
SUDO_SLEEP_DELAY = 15
def _build_and_install(self, install_dir):
"""Install setuptools on the system."""
logging.info('NOTE: setuptools install does not use install_dir.')
return self._build_and_install_from_package(install_dir)
def _build_and_install_current_dir(self, install_dir):
egg_path = self._build_egg_using_setup_py()
if not egg_path:
return False
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n'
print 'About to run sudo to install setuptools', self.version
print 'on your system for use by', sys.executable, '\n'
print '!! ^C within', self.SUDO_SLEEP_DELAY, 'seconds to abort.\n'
time.sleep(self.SUDO_SLEEP_DELAY)
# Copy the egg to the local filesystem /var/tmp so that root can
# access it properly (avoid NFS squashroot issues).
temp_dir = self._get_temp_dir()
try:
shutil.copy(egg_path, temp_dir)
egg_name = os.path.split(egg_path)[1]
temp_egg = os.path.join(temp_dir, egg_name)
p = subprocess.Popen(['sudo', '/bin/sh', temp_egg],
stdout=subprocess.PIPE)
regex = re.compile('Copying (.*?) to (.*?)\n')
match = regex.search(p.communicate()[0])
status = p.wait()
if match:
compiled = os.path.join(match.group(2), match.group(1))
os.system("sudo chmod a+r '%s'" % compiled)
finally:
shutil.rmtree(temp_dir)
if status:
logging.error('install of setuptools from egg failed.')
return False
return True
class MySQLdbPackage(ExternalPackage):
module_name = 'MySQLdb'
version = '1.2.2'
urls = ('http://downloads.sourceforge.net/project/mysql-python/'
'mysql-python/%(version)s/MySQL-python-%(version)s.tar.gz'
% dict(version=version),)
local_filename = 'MySQL-python-%s.tar.gz' % version
hex_sum = '945a04773f30091ad81743f9eb0329a3ee3de383'
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setup_py)
def _build_and_install(self, install_dir):
if not os.path.exists('/usr/bin/mysql_config'):
logging.error('You need to install /usr/bin/mysql_config')
logging.error('On Ubuntu or Debian based systems use this: '
'sudo apt-get install libmysqlclient15-dev')
return False
return self._build_and_install_from_package(install_dir)
class DjangoPackage(ExternalPackage):
version = '1.1.1'
local_filename = 'Django-%s.tar.gz' % version
urls = ('http://www.djangoproject.com/download/%s/tarball/' % version,)
hex_sum = '441c54f0e90730bf4a55432b64519169b1e6ef20'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_noegg)
def _get_installed_version_from_module(self, module):
try:
return module.get_version().split()[0]
except AttributeError:
return '0.9.6'
class NumpyPackage(ExternalPackage):
version = '1.2.1'
local_filename = 'numpy-%s.tar.gz' % version
urls = ('http://downloads.sourceforge.net/project/numpy/NumPy/%(version)s/'
'numpy-%(version)s.tar.gz' % dict(version=version),)
hex_sum = '1aa706e733aea18eaffa70d93c0105718acb66c5'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setupegg_py)
# This requires numpy so it must be declared after numpy to guarantee that it
# is already installed.
class MatplotlibPackage(ExternalPackage):
version = '0.98.5.3'
short_version = '0.98.5'
local_filename = 'matplotlib-%s.tar.gz' % version
urls = ('http://downloads.sourceforge.net/project/matplotlib/matplotlib/'
'matplotlib-%s/matplotlib-%s.tar.gz' % (short_version, version),)
hex_sum = '2f6c894cf407192b3b60351bcc6468c0385d47b6'
os_requirements = {'/usr/include/ft2build.h': 'libfreetype6-dev',
'/usr/include/png.h': 'libpng12-dev'}
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setupegg_py)
class AtForkPackage(ExternalPackage):
version = '0.1.2'
local_filename = 'atfork-%s.zip' % version
urls = ('http://python-atfork.googlecode.com/files/' + local_filename,)
hex_sum = '5baa64c73e966b57fa797040585c760c502dc70b'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_noegg)
class ParamikoPackage(ExternalPackage):
version = '1.7.5'
local_filename = 'paramiko-%s.tar.gz' % version
urls = ('http://www.lag.net/paramiko/download/' + local_filename,
'ftp://mirrors.kernel.org/gentoo/distfiles/' + local_filename,)
hex_sum = '592be7a08290070b71da63a8e6f28a803399e5c5'
_build_and_install = ExternalPackage._build_and_install_from_package
def _check_for_pycrypto(self):
# NOTE(gps): Linux distros have better python-crypto packages than we
# can easily get today via a wget due to the library's age and staleness
# yet many security and behavior bugs are fixed by patches that distros
# already apply. PyCrypto has a new active maintainer in 2009. Once a
# new release is made (http://pycrypto.org/) we should add an installer.
try:
import Crypto
except ImportError:
logging.error('Please run "sudo apt-get install python-crypto" '
'or your Linux distro\'s equivalent.')
return False
return True
def _build_and_install_current_dir(self, install_dir):
if not self._check_for_pycrypto():
return False
# paramiko 1.7.4 doesn't require building, it is just a module directory
# that we can rsync into place directly.
if not os.path.isdir('paramiko'):
raise Error('no paramiko directory in %s.' % os.getcwd())
status = system("rsync -r 'paramiko' '%s/'" % install_dir)
if status:
logging.error('%s rsync to install_dir failed.' % self.name)
return False
return True
class SimplejsonPackage(ExternalPackage):
version = '2.0.9'
local_filename = 'simplejson-%s.tar.gz' % version
urls = ('http://pypi.python.org/packages/source/s/simplejson/' +
local_filename,)
hex_sum = 'b5b26059adbe677b06c299bed30557fcb0c7df8c'
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_setup_py)
class Httplib2Package(ExternalPackage):
version = '0.6.0'
local_filename = 'httplib2-%s.tar.gz' % version
urls = ('http://httplib2.googlecode.com/files/' + local_filename,)
hex_sum = '995344b2704826cc0d61a266e995b328d92445a5'
def _get_installed_version_from_module(self, module):
# httplib2 doesn't contain a proper version
return self.version
_build_and_install = ExternalPackage._build_and_install_from_package
_build_and_install_current_dir = (
ExternalPackage._build_and_install_current_dir_noegg)
class GwtPackage(ExternalPackage):
"""Fetch and extract a local copy of GWT used to build the frontend."""
version = '2.0.3'
local_filename = 'gwt-%s.zip' % version
urls = ('http://google-web-toolkit.googlecode.com/files/' + local_filename,)
hex_sum = '1dabd25a02b9299f6fa84c51c97210a3373a663e'
name = 'gwt'
about_filename = 'about.txt'
module_name = None # Not a Python module.
def is_needed(self, install_dir):
gwt_dir = os.path.join(install_dir, self.name)
about_file = os.path.join(install_dir, self.name, self.about_filename)
if not os.path.exists(gwt_dir) or not os.path.exists(about_file):
logging.info('gwt not installed for autotest')
return True
f = open(about_file, 'r')
version_line = f.readline()
f.close()
match = re.match(r'Google Web Toolkit (.*)', version_line)
if not match:
logging.info('did not find gwt version')
return True
logging.info('found gwt version %s', match.group(1))
return match.group(1) != self.version
def _build_and_install(self, install_dir):
os.chdir(install_dir)
self._extract_compressed_package()
extracted_dir = self.local_filename[:-len('.zip')]
target_dir = os.path.join(install_dir, self.name)
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
os.rename(extracted_dir, target_dir)
return True
# This requires GWT to already be installed, so it must be declared after
# GwtPackage
class GwtIncubatorPackage(ExternalPackage):
version = '20100204-r1747'
local_filename = 'gwt-incubator-%s.jar' % version
symlink_name = 'gwt-incubator.jar'
urls = ('http://google-web-toolkit-incubator.googlecode.com/files/'
+ local_filename,)
hex_sum = '0c9495634f0627d0b4de0d78a50a3aefebf67f8c'
module_name = None # Not a Python module
def is_needed(self, install_dir):
gwt_dir = os.path.join(install_dir, GwtPackage.name)
return not os.path.exists(os.path.join(gwt_dir, self.local_filename))
def _build_and_install(self, install_dir):
dest = os.path.join(install_dir, GwtPackage.name, self.local_filename)
shutil.copyfile(self.verified_package, dest)
symlink_path = os.path.join(
install_dir, GwtPackage.name, self.symlink_name)
if os.path.exists(symlink_path):
os.remove(symlink_path)
os.symlink(dest, symlink_path)
return True
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
ComparativeGenomicsToolkit/Comparative-Annotation-Toolkit | tools/sqlite.py | 2 | 1226 | """
Tools to wrap around the native sqlite package. Necessary due to bugs in how pandas interacts with sqlalchemy.
"""
import sqlite3 as sql
__author__ = "Ian Fiddes"
class ExclusiveSqlConnection(object):
"""Context manager for an exclusive SQL connection"""
def __init__(self, path, timeout=6000):
self.path = path
self.timeout = timeout
def __enter__(self):
self.con = sql.connect(self.path, timeout=self.timeout, isolation_level="EXCLUSIVE")
try:
self.con.execute("BEGIN EXCLUSIVE")
except sql.OperationalError:
raise RuntimeError("Database still locked after {} seconds.".format(self.timeout))
return self.con
def __exit__(self, exception_type, exception_val, trace):
self.con.commit()
self.con.close()
def attach_database(con, path, name):
"""
Attaches another database found at path to the name given in the given connection.
"""
con.execute("ATTACH DATABASE '{}' AS {}".format(path, name))
def open_database(path, timeout=6000):
"""opens a database, returning the connection and cursor objects."""
con = sql.connect(path, timeout=timeout)
cur = con.cursor()
return con, cur
| apache-2.0 |
Cronjaeger/coalescent-simulations | test.py | 1 | 3785 | #test.py
#Copyright (C) 2014 Mathias Christensen Cronjaeger [email protected]
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
import time
import recursionEquation as re
import libCoal as lc
from scipy.special import binom
import numpy as np
import matplotlib.pyplot as pl
###count number of terms in recursion:
#n = 60
#array = np.array #for speed -purposes
#prod = np.prod # for speed-purposes
#partitionsMultiset = re.partitionsMultiset
#x = 0
#for n1 in range(1,n+1):
# for p in partitionsMultiset(n,n1):
# x += prod(array(p) + 1) - 1
#print x
#print "the cardinality of the set {(p,p_sub) | p partition of %i, p_sub subpartition of p} is %i"%(n,x)
### Generate plots of distribution of partition-sizes
#nSet = range(45,56)
#nMax = max(nSet)
#x = np.arange(1,nMax+1)
#
#p = np.zeros((len(nSet),nMax))
#
#for i,n in enumerate(nSet):
# for k in range(n):
# p[i,k] = len(re.partitions(n,k+1))
# pl.plot(x,p[i,:],label='n = %i'%(n))
## pl.plot(x,map(np.log,p[i,:]),label='n=%i'%(n))
#pl.legend(loc='upper right')
#pl.title("Distribution of size of partitions of n")
#pl.plot(x , Pi2_normSFS_AVG , color='red' , label=label2)
#pl.legend(loc='upper right')
#n = 15
#alpha = 1.7
#coalType = "xi_beta"
#args = (alpha,)
#
#print "Solving p-recursion for a Xi-beta coalescent with n=%i, alpha=%f..."%(n,round(alpha,3))
#t_start = time.time()
#p,g = re.p_and_g(n,'xi_beta',(alpha,))
#t_stop = time.time()
#print "...done! \ncalculations took %f seconds"%(round(t_stop-t_start,3))
#def jumpProb(part,n,q):
# '''
# calculates
# P(initial jump is to a specific state with block-sizes given
# by "part"); denoted p_lambda in my text.
# "part" is here a partition of n encoded as a multiset,
# i.e. part[i] == #i-blocks of part, and
# sum(i * part[i]) == n
# '''
# m = []
# for l in [j*[i] for i,j in enumerate(part) if j!=0]:
# m.extend(l)
# #do this right!
# return re.multinomial(n,m)*re.fourWay_beta_collisionRate(n,[x for x in m if x>1],args[0]) /q[n]
## return 1*fourWay_beta_collisionRate(n,part,args[0]) /q[n]
# ###JUMP PROB is calculated incorrectly!
## return fourWay_beta_collisionRate(n,[x for x in m if x>1],args[0]) /q[n]
#
#P_mat,q_vec = re.P_and_q(n,'xi_beta',args)
#
#P = np.zeros(n+1)
#for n1 in range(1,n):
# for p in re.partitionsMultiset_constrained(n,n1,4):
# P[n1] += jumpProb(p,n,q_vec)
#
#n1 = 20
#b1 = 7
#
#pList = []
#for i,p in enumerate(re.partitionsMultiset_constrained(n,n1,4)):
# probDist = []
# for s in re.subpartitionsMultiset(p,b1):
# prob = np.prod([binom(p[i],s[0][i]) for i in range(len(s[0]))])/binom(n1,b1)
# probDist += [(s[0],prob)]
# pList.append([(sum([x[1] for x in probDist]),len(probDist),p)]+probDist)
# if sum([x[1] for x in probDist]) != 1.0:
# print sum([x[1] for x in probDist]),len(probDist),p
#
#class testClass(object):
#
# def __init__(self,x):
# self.foo = x
# print "Initialized"
#
#
#def fib(n):
# if n == 0 or n==1:
# return 1
# else:
# return fib(n-1) + fib(n-2)
#
#def listCalc(n):
# l = []
# listBuild(l,n,n,0)
# return l
#
#def listBuild(l,n,N,length):
## print n,N,l
# if length == N-1:
# return l.append(N)
# else:
# l.append(n)
# listBuild(l,n-1,N,length+1) | gpl-2.0 |
arnoldlu/lisa | libs/utils/analysis/cpus_analysis.py | 3 | 6659 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" CPUs Analysis Module """
import matplotlib.pyplot as plt
import pylab as pl
import pandas as pd
from trappy.utils import listify
from analysis_module import AnalysisModule
class CpusAnalysis(AnalysisModule):
"""
Support for CPUs Signals Analysis
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(CpusAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_context_switches(self):
"""
Compute number of context switches on each CPU.
:returns: :mod:`pandas.DataFrame`
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Events [sched_switch] not found, context switch '
'computation not possible!')
return None
sched_df = self._dfg_trace_event('sched_switch')
cpus = range(self._platform['cpus_count'])
ctx_sw_df = pd.DataFrame(
[len(sched_df[sched_df['__cpu'] == cpu]) for cpu in cpus],
index=cpus,
columns=['context_switch_cnt']
)
ctx_sw_df.index.name = 'cpu'
return ctx_sw_df
###############################################################################
# Plotting Methods
###############################################################################
def plotCPU(self, cpus=None):
"""
Plot CPU-related signals for both big and LITTLE clusters.
:param cpus: list of CPUs to be plotted
:type cpus: list(int)
"""
if not self._trace.hasEvents('sched_load_avg_cpu'):
self._log.warning('Events [sched_load_avg_cpu] not found, '
'plot DISABLED!')
return
# Filter on specified cpus
if cpus is None:
cpus = sorted(self._platform['clusters']['little'] +
self._platform['clusters']['big'])
cpus = listify(cpus)
# Plot: big CPUs
bcpus = set(cpus) & set(self._platform['clusters']['big'])
if bcpus:
self._plotCPU(bcpus, "big")
# Plot: LITTLE CPUs
lcpus = set(cpus) & set(self._platform['clusters']['little'])
if lcpus:
self._plotCPU(lcpus, "LITTLE")
###############################################################################
# Utility Methods
###############################################################################
def _plotCPU(self, cpus, label=''):
"""
Internal method that generates plots for all input CPUs.
:param cpus: list of CPUs to be plotted
:type cpus: list(int)
"""
if label != '':
label1 = '{} '.format(label)
label2 = '_{}s'.format(label.lower())
# Plot required CPUs
_, pltaxes = plt.subplots(len(cpus), 1, figsize=(16, 3*(len(cpus))))
idx = 0
for cpu in cpus:
# Reference axes to be used
axes = pltaxes
if len(cpus) > 1:
axes = pltaxes[idx]
# Add CPU utilization
axes.set_title('{0:s}CPU [{1:d}]'.format(label1, cpu))
df = self._dfg_trace_event('sched_load_avg_cpu')
df = df[df.cpu == cpu]
if len(df):
df[['util_avg']].plot(ax=axes, drawstyle='steps-post',
alpha=0.4)
# if self._trace.hasEvents('sched_boost_cpu'):
# df = self._dfg_trace_event('sched_boost_cpu')
# df = df[df.cpu == cpu]
# if len(df):
# df[['usage', 'boosted_usage']].plot(
# ax=axes,
# style=['m-', 'r-'],
# drawstyle='steps-post');
# Add Capacities data if avilable
if self._trace.hasEvents('cpu_capacity'):
df = self._dfg_trace_event('cpu_capacity')
df = df[df.cpu == cpu]
if len(df):
# data = df[['capacity', 'tip_capacity', 'max_capacity']]
# data.plot(ax=axes, style=['m', 'y', 'r'],
data = df[['capacity', 'tip_capacity']]
data.plot(ax=axes, style=['m', '--y'],
drawstyle='steps-post')
# Add overutilized signal to the plot
self._trace.analysis.status.plotOverutilized(axes)
axes.set_ylim(0, 1100)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
if idx == 0:
axes.annotate("{}CPUs Signals".format(label1),
xy=(0, axes.get_ylim()[1]),
xytext=(-50, 25),
textcoords='offset points', fontsize=16)
# Disable x-axis timestamp for top-most cpus
if len(cpus) > 1 and idx < len(cpus)-1:
axes.set_xticklabels([])
axes.set_xlabel('')
axes.grid(True)
idx += 1
# Save generated plots into datadir
figname = '{}/{}cpus{}.png'.format(self._trace.plots_dir,
self._trace.plots_prefix, label2)
pl.savefig(figname, bbox_inches='tight')
def plotContextSwitch(self):
"""
Plot histogram of context switches on each CPU.
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Events [sched_switch] not found, plot DISABLED!')
return
ctx_sw_df = self._dfg_context_switches()
ax = ctx_sw_df.plot.bar(title="Per-CPU Task Context Switches",
legend=False,
figsize=(16, 8))
ax.grid()
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 |
cmcantalupo/geopm | integration/test/check_trace.py | 1 | 4076 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Basic sanity checks of trace files. These methods can be used in
other tests, or this script can be run against a set of trace files
given as input.
"""
import sys
import glob
import unittest
import pandas
import util
def read_meta_data(trace_file):
agent = None
with open(trace_file) as infile:
for line in infile:
if agent is None and line.startswith('#') and 'agent' in line:
agent = line.split(': ')[-1]
if agent is not None:
break
return agent
def check_sample_rate(trace_file, expected_sample_rate, verbose=False):
"""Check that sample rate is regular and fast.
"""
print(trace_file)
test = unittest.TestCase()
trace_data = pandas.read_csv(trace_file, delimiter='|', comment='#')
tt = trace_data
max_mean = 0.01 # 10 millisecond max sample period
max_nstd = 0.1 # 10% normalized standard deviation (std / mean)
delta_t = tt['TIME'].diff()
if verbose:
sys.stdout.write('sample rates:\n{}\n'.format(delta_t.describe()))
delta_t = delta_t.loc[delta_t != 0]
test.assertGreater(max_mean, delta_t.mean())
test.assertGreater(max_nstd, delta_t.std() / delta_t.mean())
util.assertNear(test, delta_t.mean(), expected_sample_rate)
# find outliers
delta_t_out = delta_t[(delta_t - delta_t.mean()) >= 3*delta_t.std()]
if verbose:
sys.stdout.write('outliers (>3*stdev):\n{}\n'.format(delta_t_out.describe()))
num_samples = len(delta_t)
num_out = len(delta_t_out)
# check that less than 1% of the samples are outliers
test.assertLess(num_out, num_samples * 0.01)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('Usage: {} <trace file name or glob pattern>\n'.format(sys.argv[0]))
sys.exit(1)
trace_pattern = sys.argv[1]
traces = glob.glob(trace_pattern)
if len(traces) == 0:
sys.stderr.write('No trace files found for pattern {}.\n'.format(trace_pattern))
sys.exit(1)
default_sample_rate = 0.005
for tt in traces:
agent = read_meta_data(tt)
# TODO: check these for all agents, or just make this a CLI
# option? what if different agent traces are in this glob?
if agent in ['energy_efficient', 'frequency_map']:
sample_rate = 0.002
else:
sample_rate = default_sample_rate
check_sample_rate(tt, sample_rate, verbose=True)
| bsd-3-clause |
hrjn/scikit-learn | examples/linear_model/plot_ridge_path.py | 27 | 2129 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
coefs = []
for a in alphas:
ridge = linear_model.Ridge(alpha=a, fit_intercept=False)
ridge.fit(X, y)
coefs.append(ridge.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jesuscript/topo-mpi | topo/misc/commandline.py | 1 | 18877 | """
Support functions for parsing command-line arguments and providing
the Topographica command prompt. Typically called from the
'./topographica' script, but can be called directly if using
Topographica files within a separate Python.
$Id$
"""
__version__='$Revision$'
from optparse import OptionParser
import sys, __main__, math, os, re
import topo
from param.parameterized import Parameterized,OptionalSingleton
try:
# By default, use a non-GUI backend for matplotlib.
from matplotlib import rcParams
rcParams['backend']='Agg'
matplotlib_imported=True
except ImportError:
matplotlib_imported=False
ipython = None
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed as IPShell
from IPython.config.loader import Config
ipython = "0.11"
except ImportError:
try:
# older version?
from IPython.Shell import IPShell
ipython = "0.10"
except ImportError:
print "Note: IPython is not available; using basic interactive Python prompt instead."
# Startup banner
BANNER = """
Welcome to Topographica!
Type help() for interactive help with python, help(topo) for general
information about Topographica, help(commandname) for info on a
specific command, or topo.about() for info on this release, including
licensing information.
"""
class GlobalParams(Parameterized,OptionalSingleton):
"""
A Parameterized class providing script-level parameters.
Script-level parameters can be set from the commandline by passing
via -p, e.g. ./topographica -p retina_density=10
Within scripts, parameters can be declared by using the add()
method.
Example usage in a script:
from topo.misc.commandline import global_params as p
p.add(
retina_density=param.Number(default=24.0,bounds=(0,None),
inclusive_bounds=(False,True),doc=\"""
The nominal_density to use for the retina.\"""))
...
topo.sim['Retina']=sheet.GeneratorSheet(
nominal_density=p.retina_density)
Further information:
'context' is usually set to __main__.__dict__ and is used to find
the value of a parameter as it is add()ed to this object
(i.e. add() has access to values set via the commandline or in
scripts).
Values set via set_in_context() or exec_in_context() (used by -p)
are tracked: warnings are issued for overwritten values, and
unused values can be warned about via check_for_unused_names().
The context is not saved in snapshots, but parameter values are
saved.
"""
context = None
def __new__(cls,*args,**kw):
return OptionalSingleton.__new__(cls,True)
def __init__(self,context=None,**params):
self.context = context or {}
self.unused_names = set()
params['name']="global_params"
super(GlobalParams,self).__init__(**params)
def __getstate__(self):
# context is neither saved nor restored
# (in our current usage, the context of the GlobalParams
# instance will be set to __main__.__dict__ on startup).
state = super(GlobalParams,self).__getstate__()
del state['context']
return state
def set_in_context(self,**params):
"""
Set in self.context all name=val pairs specified in **params,
tracking new names and warning of any replacements.
"""
for name,val in params.items():
if name in self.context:
self.warning("Replacing previous value of '%s' with '%s'"%(name,val))
self.context[name]=val
self.unused_names.add(name)
def exec_in_context(self,arg):
"""
exec arg in self.context, tracking new names and
warning of any replacements.
"""
## contains elaborate scheme to detect what is specified by
## -s, and to warn about any replacement
current_ids = dict([(k,id(v)) for k,v in self.context.items()])
exec arg in self.context
for k,v in self.context.items():
if k in self.unused_names and id(v)!=current_ids[k]:
self.warning("Replacing previous value of '%s' with '%s'"%(k,v))
new_names = set(self.context.keys()).difference(set(current_ids.keys()))
for k in new_names:
self.unused_names.add(k)
def check_for_unused_names(self):
"""Warn about any unused names."""
for s in self.unused_names:
self.warning("'%s' is unused."%s)
# warns for param that specified with -c (but also if name gets defined in __main__,
# e.g. by default_density=global_params.default_density in a script file
## for name in self.params():
## if name in self.context:
## self.warning("'%s' still exists in global_params.context"%name)
# detect duplicate param value that wasn't used (e.g. specified with after script)
for name,val in self.params().items():
if name in self.context:
if self.context[name]!=self.inspect_value(name):
self.warning("'%s=%s' is unused."%(name,self.context[name]))
def add(self,**kw):
"""
For each parameter_name=parameter_object specified in kw:
* adds the parameter_object to this object's class
* if there is an entry in context that has the same name as the parameter,
sets the value of the parameter in this object to that value, and then removes
the name from context
"""
for p_name,p_obj in kw.items():
self._add_parameter(p_name,p_obj)
if p_name in self.context:
setattr(self,p_name,self.context[p_name])
if p_name in self.unused_names:
# i.e. remove from __main__ if it was a -p option (but not if -c)
del self.context[p_name]
self.unused_names.remove(p_name)
global_params=GlobalParams(context=__main__.__dict__)
##### Command-prompt formatting
#
class IPCommandPromptHandler(object):
"""
Allows control over IPython's dynamic command prompts.
"""
_format = ''
_prompt = ''
@classmethod
def set_format(cls,format):
"""
Set IPython's prompt template to format.
"""
import __main__
IP = __main__.__dict__['__IP']
prompt = getattr(IP.outputcache,cls._prompt)
prompt.p_template = format
prompt.set_p_str()
cls._format = format
@classmethod
def get_format(cls):
"""
Return the current template.
"""
return cls._format
class CommandPrompt(IPCommandPromptHandler):
"""
Control over input prompt.
Several predefined formats are provided, and any of these (or any
arbitrary string) can be used by calling set_format() with their
values.
See the IPython manual for details:
http://ipython.scipy.org/doc/manual/html/config/index.html
Examples:
# Use one of the predefined formats:
CommandPrompt.set_format(CommandPrompt.basic_format)
# Just print the command number:
CommandPrompt.set_format('\# ')
# Print the command number but don't use color:
CommandPrompt.set_format('\N ')
# Print the value of my_var at each prompt:
CommandPrompt.set_format('${my_var}>>> ')
"""
_prompt = 'prompt1'
# Predefined alternatives
basic_format = 'Topographica>>> '
simtime_format = 'topo_t${topo.sim.timestr()}>>> '
simtimecmd_format = 'topo_t${topo.sim.timestr()}_c\\#>>> '
_format = simtimecmd_format
class CommandPrompt2(IPCommandPromptHandler):
"""
Control over continuation prompt.
(See CommandPrompt.)
"""
_prompt = 'prompt2'
basic_format = ' .\\D.: '
_format = basic_format
class OutputPrompt(IPCommandPromptHandler):
"""
Control over output prompt.
(See CommandPrompt.)
"""
_prompt = 'prompt_out'
basic_format = 'Out[\#]:'
_format = basic_format
#####
# Use to define global constants
global_constants = {'pi':math.pi}
# Create the topographica parser.
usage = "usage: topographica ([<option>]:[<filename>])*\n\
where any combination of options and Python script filenames will be\n\
processed in order left to right."
topo_parser = OptionParser(usage=usage)
def sim_name_from_filename(filename):
"""
Set the simulation title from the given filename, if none has been
set already.
"""
if topo.sim.name is None:
topo.sim.name=re.sub('.ty$','',os.path.basename(filename))
def boolean_option_action(option,opt_str,value,parser):
"""Callback function for boolean-valued options that apply to the entire run."""
#print "Processing %s" % (opt_str)
setattr(parser.values,option.dest,True)
def interactive():
os.environ['PYTHONINSPECT']='1'
# CB: note that topographica should stay open if an error occurs
# anywhere after a -i (i.e. in a -c command or script)
def i_action(option,opt_str,value,parser):
"""Callback function for the -i option."""
boolean_option_action(option,opt_str,value,parser)
interactive()
topo_parser.add_option("-i","--interactive",action="callback",callback=i_action,
dest="interactive",default=False,
help="provide an interactive prompt even if stdin does not appear to be a terminal.")
def v_action(option,opt_str,value,parser):
"""Callback function for the -v option."""
import param.parameterized
param.parameterized.min_print_level=param.parameterized.VERBOSE
print "Enabling verbose message output."
topo_parser.add_option("-v","--verbose",action="callback",callback=v_action,dest="verbose",default=False,help="""\
enable verbose messaging output.""")
def d_action(option,opt_str,value,parser):
"""Callback function for the -d option."""
import param.parameterized
param.parameterized.min_print_level=param.parameterized.DEBUG
print "Enabling debugging message output."
topo_parser.add_option("-d","--debug",action="callback",callback=d_action,dest="debug",default=False,help="""\
enable debugging message output (implies --verbose).""")
def l_action(option,opt_str,value,parser):
"""Callback function for the -l option."""
boolean_option_action(option,opt_str,value,parser)
from topo.misc.legacy import install_legacy_support
print "Enabling legacy support."
install_legacy_support()
topo_parser.add_option("-l","--legacy",action="callback",callback=l_action,dest="legacy",default=False,help="""\
launch Topographica with legacy support enabled.""")
def mpi_action(option, opt_str, value, parser):
from topo.misc import pmi
#print "Enabling MPI support through PMI"
pmi.setup()
topo_parser.add_option("--mpi",action="callback",callback=mpi_action,dest="mpi",default=False,help="""\
launch Topographica with MPI support enabled.""")
def gui(start=True):
"""Start the GUI as if -g were supplied in the command used to launch Topographica."""
if matplotlib_imported:
rcParams['backend']='TkAgg'
auto_import_commands()
if start:
import topo.tkgui
topo.tkgui.start()
# Topographica stays open if an error occurs after -g
# (see comment by i_action)
def g_action(option,opt_str,value,parser):
"""Callback function for the -g option."""
boolean_option_action(option,opt_str,value,parser)
interactive()
gui()
topo_parser.add_option("-g","--gui",action="callback",callback=g_action,dest="gui",default=False,help="""\
launch an interactive graphical user interface; \
equivalent to -c 'from topo.misc.commandline import gui ; gui()'. \
Implies -a.""")
topo_parser.add_option("--pdb",action="store_true",dest="pdb",help="""\
Automatically call the pdb debugger after every uncaught \
exception. See IPython documentation for further details.""")
# Keeps track of whether something has been performed, when deciding whether to assume -i
something_executed=False
def c_action(option,opt_str,value,parser):
"""Callback function for the -c option."""
#print "Processing %s '%s'" % (opt_str,value)
exec value in __main__.__dict__
global something_executed
something_executed=True
topo_parser.add_option("-c","--command",action = "callback",callback=c_action,type="string",
default=[],dest="commands",metavar="\"<command>\"",
help="string of arbitrary Python code to be executed in the main namespace.")
def p_action(option,opt_str,value,parser):
"""Callback function for the -p option."""
global_params.exec_in_context(value)
global something_executed
something_executed=True
topo_parser.add_option("-p","--set-parameter",action = "callback",callback=p_action,type="string",
default=[],dest="commands",metavar="\"<command>\"",
help="command specifying value(s) of script-level (global) Parameter(s).")
def auto_import_commands():
"""Import the contents of all files in the topo/command/ directory."""
import re,os
import topo
import __main__
# CEBALERT: this kind of thing (topo.__file__) won't work with
# py2exe and similar tools
topo_path = os.path.join(os.path.split(topo.__file__)[0],"command")
for f in os.listdir(topo_path):
if re.match('^[^_.].*\.py$',f):
modulename = re.sub('\.py$','',f)
exec "from topo.command."+modulename+" import *" in __main__.__dict__
def a_action(option,opt_str,value,parser):
"""Callback function for the -a option."""
auto_import_commands()
topo_parser.add_option("-a","--auto-import-commands",action="callback",callback=a_action,help="""\
import everything from commands/*.py into the main namespace, for convenience; \
equivalent to -c 'from topo.misc.commandline import auto_import_commands ; auto_import_commands()'.""")
def exec_startup_files():
"""
Execute startup files.
Linux/UNIX/OS X: ~/.topographicarc
Windows: %USERPROFILE%\topographica.ini
"""
# From Bilal: On OS X, ~/Library/Preferences/ is the standard path
# for user-defined params. The filename format (corresponding to
# .ini on windows) is org.topographica.plist, where a plist is an
# XML file. But, many shell-based programs follow the Unix
# convention, so we should be fine doing that.
# Linux/UNIX/OS X:
rcpath = os.path.join(os.path.expanduser("~"),'.topographicarc')
# Windows (ini is convention, and can be double clicked to edit):
inipath = os.path.join(os.path.expandvars("$USERPROFILE"),'topographica.ini')
for startup_file in (rcpath,inipath):
if os.path.exists(startup_file):
print "Executing user startup file %s" % (startup_file)
execfile(startup_file,__main__.__dict__)
#####
# CEBALERT: locations we used to use on Windows and OS X. Should
# remove after 0.9.8.
import param
# application data on windows
inipath = os.path.join(os.path.expandvars("$APPDATA"),'Topographica','topographica.ini')
# application support on OS X
configpath = os.path.join(os.path.expanduser("~"),"Library","Application Support",'Topographica','topographica.config')
for startup_file in (configpath,inipath):
if os.path.exists(startup_file):
param.Parameterized().warning("Ignoring %s; location for startup file is %s (UNIX/Linux/Mac OS X) or %s (Windows)."%(startup_file,rcpath,inipath))
#####
### Execute what is specified by the options.
def process_argv(argv):
"""
Process command-line arguments (minus argv[0]!), rearrange and execute.
"""
# Initial preparation
import __main__
for (k,v) in global_constants.items():
exec '%s = %s' % (k,v) in __main__.__dict__
exec_startup_files()
# Repeatedly process options, if any, followed by filenames, if any, until nothing is left
topo_parser.disable_interspersed_args()
args=argv
option=None
global something_executed
while True:
# Process options up until the first filename
(option,args) = topo_parser.parse_args(args,option)
# Handle filename
if args:
filename=args.pop(0)
#print "Executing %s" % (filename)
filedir = os.path.dirname(os.path.abspath(filename))
sys.path.insert(0,filedir) # Allow imports relative to this file's path
sim_name_from_filename(filename) # Default value of topo.sim.name
execfile(filename,__main__.__dict__)
something_executed=True
if not args:
break
global_params.check_for_unused_names()
# If no scripts and no commands were given, pretend -i was given.
if not something_executed: interactive()
if option.gui: topo.guimain.title(topo.sim.name)
## INTERACTIVE SESSION BEGINS HERE (i.e. can't have anything but
## some kind of cleanup code afterwards)
if os.environ.get('PYTHONINSPECT'):
print BANNER
# CBALERT: should probably allow a way for users to pass
# things to IPython? Or at least set up some kind of
# topographica ipython config file. Right now, a topo_parser
# option has to be added for every ipython option we want to
# support (e.g. see --pdb)
if ipython:
if ipython == "0.10":
# Stop IPython namespace hack?
# http://www.nabble.com/__main__-vs-__main__-td14606612.html
__main__.__name__="__mynamespace__"
ipython_args = ['-noconfirm_exit','-nobanner',
'-pi1',CommandPrompt.get_format(),
'-pi2',CommandPrompt2.get_format(),
'-po',OutputPrompt.get_format()]
if option.pdb:
ipython_args.append('-pdb')
ipshell = IPShell(
ipython_args,
user_ns=__main__.__dict__,
)
ipshell.mainloop(sys_exit=1)
elif ipython == "0.11":
config = Config()
config.InteractiveShell.prompt_in1 = CommandPrompt.get_format()
config.InteractiveShell.prompt_in2 = CommandPrompt2.get_format()
config.InteractiveShell.prompt_out = OutputPrompt.get_format()
config.InteractiveShell.confirm_exit = False
ipshell = IPShell(
config=config,
user_ns=__main__.__dict__,
banner1="",
exit_msg="",
)
ipshell()
sys.exit()
| bsd-3-clause |
DangoMelon0701/PyRemote-Sensing | MODIS_AOD modified/get_aod.py | 1 | 6040 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 02:27:52 2018
@author: Gerardo A. Rivera Tello
"""
import os, ujson, gdal, time, datetime
import numpy as np
import pandas as pd
from pyproj import Geod
from scipy import interpolate
#%%
def load_json(json_file):
with open(json_file) as ofile:
data_dict = ujson.load(ofile)
return data_dict
#%%
def get_files(extension):
files_list = [files for files in os.listdir(os.getcwd()) if files.endswith(extension)]
return files_list
#%%
def padwithnan(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = np.nan
vector[-pad_width[1]:] = np.nan
return vector
#%%
def get_sds(hdf):
mod_file = gdal.Open(hdf)
sds = mod_file.GetSubDatasets()
_sds = [ name for name in sds if 'UNKNOWN' in name[0]]
mod_file = None
return _sds
#%%
def interp_nan(grid):
x = np.arange(0, grid.shape[1])
y = np.arange(0, grid.shape[0])
grid = np.ma.masked_invalid(grid)
xx, yy = np.meshgrid(x,y)
x1 = xx[~grid.mask]
y1 = yy[~grid.mask]
new_grid = grid[~grid.mask]
GD1 = interpolate.griddata((x1,y1),new_grid.ravel(),(xx,yy),method='linear')
return GD1
#%%
def load_modis(hdf):
ds_names = get_sds(hdf)
strings = ['Latitude','Longitude', 'Scan_Start_Time (64-bit floating-point)']
latlon_sds = [line[0] for line in ds_names if any(s in line[1] for s in strings)]
lon_hdf = gdal.Open(latlon_sds[0])
_lon = lon_hdf.ReadAsArray()
lon_hdf = None
lat_hdf = gdal.Open(latlon_sds[1])
_lat = lat_hdf.ReadAsArray()
lat_hdf = None
time_hdf = gdal.Open(latlon_sds[2])
_time = time_hdf.ReadAsArray()
return _lat,_lon,_time
#%%
def get_data(sds):
data_hdf = gdal.Open(sds)
_data = data_hdf.ReadAsArray().astype(np.float)
scale = data_hdf.GetRasterBand(1)
scale = scale.GetScale()
meta = data_hdf.GetMetadata()
fill_val = float(meta['_FillValue'])
_data[_data == fill_val] = np.nan
data_hdf = None
return _data*scale, meta
#%%
def calc_grid(data,x,y,dim):
if dim != 1:
subset = data[y-dim:y+dim,x-dim:x+dim]
is_nan = np.count_nonzero(~np.isnan(subset))
if is_nan != 0:
return np.nanmean(subset)
else:
return False
elif ~np.isnan(data[y][x]):
return data[y][x]
else:
return 'wat'
#%%
def get_distance(an_lon,an_lat,lon,lat):
wgs84_geod = Geod(ellps="WGS84")
return wgs84_geod.inv(an_lon,an_lat,lon,lat)[2]
#%%
def get_time(metadata,time,x,y):
day = metadata['RANGEBEGINNINGDATE']
passing_t = datetime.timedelta(seconds=time[y,x])
fixed_t = datetime.datetime(1993,1,1)
final_t = fixed_t+passing_t
# mtime = metadata['RANGEBEGINNINGTIME'][0:5]
# time_for_pixel = 300.0/(shape[0])
# if "MOD" in sds:
# passing_time = int(time_for_pixel*(shape[0]-(y+1))/60)
# elif "MYD" in sds:
# passing_time = int(time_for_pixel*(y+1)/60)
# mtime = [int(t) for t in mtime.split(":")]
# mtime[1] = mtime[1] + passing_time
# if mtime[1] >= 60:
# mtime[1] -= 60
# mtime[0] += 1
# end_time = ":".join([str(item).zfill(2) for item in mtime])
# else:
# end_time = ":".join([str(item).zfill(2) for item in mtime])
return day, final_t.strftime('%X')
#%%
def main(aeronet_station,template,num):
json_file = get_files('.json')
st_data = load_json(json_file[0])
st_data = st_data[aeronet_station]
anlat = st_data['lat']
anlon = st_data['lon']
# df1 = pd.DataFrame(columns=['Date','Time','Data'])
# df3 = df1.copy()
# df5 = df1.copy()
hdf_files = get_files('.hdf')
with open("{}3x3_MODIS.txt".format(aeronet_station),"w") as file3x3_end, open("{}Gio_MODIS.txt".format(aeronet_station),'w') as fileGio_end:
for hdf in hdf_files:
sds = template.format(hdf,num)
lat,lon, time = load_modis(hdf)
if not (lat.min()<anlat<lat.max()) or not (lon.min()<anlon<lon.max()):
continue
if lat[lat==-999.].size != 0:
lat[lat==-999.]=np.nan
lat=lat[~np.isnan(lat).any(axis=1)]
lon[lon==-999.]=np.nan
lon=lon[~np.isnan(lon).any(axis=1)]
# lat[lat==-999.] = np.nan
# lat = interp_nan(lat)
# lon[lon==-999.] = np.nan
# lon = interp_nan(lon)
dist = get_distance(np.full(lon.shape,anlon),np.full(lat.shape,anlat),lon,lat)
data, meta = get_data(sds)
giov = np.nanmean(data[np.where(dist<=27500)])
y,x = np.unravel_index(dist.argmin(),dist.shape)
day, end_time = get_time(meta,time,x,y)
if ~np.isnan(giov):
fileGio_end.write("{}\t{}\t{}\n".format(day,end_time,giov))
if dist.min() > 4242 :
del data, meta, dist, giov
continue
print 'Done {}\n'.format(hdf)
data = np.pad(data,2,padwithnan)
x += 2
y += 2
v3 = calc_grid(data,x,y,3)
# df1 = df1.append({'Date':day,'Time':end_time,'Data':v1},ignore_index=True)
# df3 = df3.append({'Date':day,'Time':end_time,'Data':v3},ignore_index=True)
# df5 = df5.append({'Date':day,'Time':end_time,'Data':v5},ignore_index=True)
if v3 != False:
file3x3_end.write("{}\t{}\t{}\n".format(day,end_time,v3))
# df1.to_csv('{}1x1_MODIS.txt'.format(aeronet_station),header=None,index=None,sep='\t')
# df3.to_csv('{}3x3_MODIS.txt'.format(aeronet_station),header=None,index=None,sep='\t')
# df5.to_csv('{}5x5_MODIS.txt'.format(aeronet_station),header=None,index=None,sep='\t')
#%%
if __name__ == '__main__':
start_time = time.time()
file_template = 'HDF4_SDS:UNKNOWN:{}:{}'
main('lapaz',file_template,12)
print "--- {} seconds --- \n".format(round(time.time() - start_time,2))
os.system("pause") | mit |
Xeralux/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 26 | 13023 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
hmendozap/auto-sklearn | autosklearn/pipeline/components/regression/gaussian_process.py | 1 | 2828 | import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class GaussianProcess(AutoSklearnRegressionAlgorithm):
def __init__(self, nugget, thetaL, thetaU, normalize=False, copy_X=False,
random_state=None):
self.nugget = float(nugget)
self.thetaL = float(thetaL)
self.thetaU = float(thetaU)
self.normalize = normalize
self.copy_X = copy_X
# We ignore it
self.random_state = random_state
self.estimator = None
self.scaler = None
def fit(self, X, Y):
import sklearn.gaussian_process
import sklearn.preprocessing
# Instanciate a Gaussian Process model
self.estimator = sklearn.gaussian_process.GaussianProcess(
corr='squared_exponential',
theta0=np.ones(X.shape[1]) * 1e-1,
thetaL=np.ones(X.shape[1]) * self.thetaL,
thetaU=np.ones(X.shape[1]) * self.thetaU,
nugget=self.nugget,
optimizer='Welch',
random_state=self.random_state)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
self.scaler.fit(Y)
Y_scaled = self.scaler.transform(Y)
self.estimator.fit(X, Y_scaled)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
if self.scaler is None:
raise NotImplementedError
Y_pred = self.estimator.predict(X, batch_size=512)
return self.scaler.inverse_transform(Y_pred)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'GP',
'name': 'Gaussian Process',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
nugget = UniformFloatHyperparameter(
name="nugget", lower=0.0001, upper=10, default=0.1, log=True)
thetaL = UniformFloatHyperparameter(
name="thetaL", lower=1e-6, upper=1e-3, default=1e-4, log=True)
thetaU = UniformFloatHyperparameter(
name="thetaU", lower=0.2, upper=10, default=1.0, log=True)
cs = ConfigurationSpace()
cs.add_hyperparameter(nugget)
cs.add_hyperparameter(thetaL)
cs.add_hyperparameter(thetaU)
return cs
| bsd-3-clause |
bzero/statsmodels | statsmodels/tsa/vector_ar/util.py | 24 | 6383 | """
Miscellaneous utility code for VAR estimation
"""
from statsmodels.compat.python import range, string_types, asbytes
import numpy as np
import scipy.stats as stats
import scipy.linalg as L
import scipy.linalg.decomp as decomp
import statsmodels.tsa.tsatools as tsa
from scipy.linalg import cholesky
#-------------------------------------------------------------------------------
# Auxiliary functions for estimation
def get_var_endog(y, lags, trend='c', has_constant='skip'):
"""
Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lutkepohl p.70 (transposed)
has_constant can be 'raise', 'add', or 'skip'. See add_constant.
"""
nobs = len(y)
# Ravel C order, need to put in descending order
Z = np.array([y[t-lags : t][::-1].ravel() for t in range(lags, nobs)])
# Add constant, trend, etc.
if trend != 'nc':
Z = tsa.add_trend(Z, prepend=True, trend=trend,
has_constant=has_constant)
return Z
def get_trendorder(trend='c'):
# Handle constant, etc.
if trend == 'c':
trendorder = 1
elif trend == 'nc':
trendorder = 0
elif trend == 'ct':
trendorder = 2
elif trend == 'ctt':
trendorder = 3
return trendorder
def make_lag_names(names, lag_order, trendorder=1):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, string_types):
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, string_types):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(0, 'trend')
if trendorder > 2:
lag_names.insert(0, 'trend**2')
return lag_names
def comp_matrix(coefs):
"""
Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0]
"""
p, k, k2 = coefs.shape
assert(k == k2)
kp = k * p
result = np.zeros((kp, kp))
result[:k] = np.concatenate(coefs, axis=1)
# Set I_K matrices
if p > 1:
result[np.arange(k, kp), np.arange(kp-k)] = 1
return result
#-------------------------------------------------------------------------------
# Miscellaneous stuff
def parse_lutkepohl_data(path): # pragma: no cover
"""
Parse data files from Lutkepohl (2005) book
Source for data files: www.jmulti.de
"""
from collections import deque
from datetime import datetime
import pandas
import pandas.core.datetools as dt
import re
regex = re.compile(asbytes('<(.*) (\w)([\d]+)>.*'))
lines = deque(open(path, 'rb'))
to_skip = 0
while asbytes('*/') not in lines.popleft():
#while '*/' not in lines.popleft():
to_skip += 1
while True:
to_skip += 1
line = lines.popleft()
m = regex.match(line)
if m:
year, freq, start_point = m.groups()
break
data = np.genfromtxt(path, names=True, skip_header=to_skip+1)
n = len(data)
# generate the corresponding date range (using pandas for now)
start_point = int(start_point)
year = int(year)
offsets = {
asbytes('Q') : dt.BQuarterEnd(),
asbytes('M') : dt.BMonthEnd(),
asbytes('A') : dt.BYearEnd()
}
# create an instance
offset = offsets[freq]
inc = offset * (start_point - 1)
start_date = offset.rollforward(datetime(year, 1, 1)) + inc
offset = offsets[freq]
from pandas import DatetimeIndex # pylint: disable=E0611
date_range = DatetimeIndex(start=start_date, freq=offset, periods=n)
return data, date_range
def get_logdet(m):
from statsmodels.tools.linalg import logdet_symm
return logdet_symm(m)
get_logdet = np.deprecate(get_logdet,
"statsmodels.tsa.vector_ar.util.get_logdet",
"statsmodels.tools.linalg.logdet_symm",
"get_logdet is deprecated and will be removed in "
"0.8.0")
def norm_signif_level(alpha=0.05):
return stats.norm.ppf(1 - alpha / 2)
def acf_to_acorr(acf):
diag = np.diag(acf[0])
# numpy broadcasting sufficient
return acf / np.sqrt(np.outer(diag, diag))
def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None):
"""
Simulate simple VAR(p) process with known coefficients, intercept, white
noise covariance, etc.
"""
if seed is not None:
np.random.seed(seed=seed)
from numpy.random import multivariate_normal as rmvnorm
p, k, k = coefs.shape
ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps)
result = np.zeros((steps, k))
result[p:] = intercept + ugen[p:]
# add in AR terms
for t in range(p, steps):
ygen = result[t]
for j in range(p):
ygen += np.dot(coefs[j], result[t-j-1])
return result
def get_index(lst, name):
try:
result = lst.index(name)
except Exception:
if not isinstance(name, int):
raise
result = name
return result
#method used repeatedly in Sims-Zha error bands
def eigval_decomp(sym_array):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector
"""
#check if symmetric, do not include shock period
eigva, W = decomp.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return W, eigva, k
def vech(A):
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length=A.shape[1]
vechvec=[]
for i in range(length):
b=i
while b < length:
vechvec.append(A[b,i])
b=b+1
vechvec=np.asarray(vechvec)
return vechvec
| bsd-3-clause |
abhisg/scikit-learn | sklearn/linear_model/tests/test_sag.py | 93 | 25649 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.datasets import make_blobs
from sklearn.base import clone
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = get_max_squared_sum(X)
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
def test_get_max_squared_sum():
n_samples = 100
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features).astype(np.float64)
mask = rng.randn(n_samples, n_features)
X[mask > 0] = 0.
X_csr = sp.csr_matrix(X)
X[0, 3] = 0.
X_csr[0, 3] = 0.
sum_X = get_max_squared_sum(X)
sum_X_csr = get_max_squared_sum(X_csr)
assert_almost_equal(sum_X, sum_X_csr)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
| bsd-3-clause |
netsamir/dotfiles | files/vim/bundle/YouCompleteMe/third_party/ycmd/third_party/python-future/tests/test_future/test_futurize.py | 7 | 39993 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import pprint
import tempfile
from subprocess import Popen, PIPE
import os
from libfuturize.fixer_util import is_shebang_comment, is_encoding_comment
from lib2to3.fixer_util import FromImport
from lib2to3.pytree import Leaf, Node
from lib2to3.pygram import token
from future.tests.base import (CodeHandler, unittest, skip26, reformat_code,
order_future_lines, expectedFailurePY26)
from future.utils import PY2
class TestLibFuturize(unittest.TestCase):
def setUp(self):
# For tests that need a text file:
_, self.textfilename = tempfile.mkstemp(text=True)
super(TestLibFuturize, self).setUp()
def tearDown(self):
os.unlink(self.textfilename)
def test_correct_exit_status(self):
"""
Issue #119: futurize and pasteurize were not exiting with the correct
status code. This is because the status code returned from
libfuturize.main.main() etc. was a ``newint``, which sys.exit() always
translates into 1!
"""
from libfuturize.main import main
retcode = main([self.textfilename])
self.assertTrue(isinstance(retcode, int)) # i.e. Py2 builtin int
def test_is_shebang_comment(self):
"""
Tests whether the fixer_util.is_encoding_comment() function is working.
"""
shebang_comments = [u'#!/usr/bin/env python\n'
u"#!/usr/bin/python2\n",
u"#! /usr/bin/python3\n",
]
not_shebang_comments = [u"# I saw a giant python\n",
u"# I have never seen a python2\n",
]
for comment in shebang_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertTrue(is_shebang_comment(node))
for comment in not_shebang_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertFalse(is_shebang_comment(node))
def test_is_encoding_comment(self):
"""
Tests whether the fixer_util.is_encoding_comment() function is working.
"""
encoding_comments = [u"# coding: utf-8",
u"# encoding: utf-8",
u"# -*- coding: latin-1 -*-",
u"# vim: set fileencoding=iso-8859-15 :",
]
not_encoding_comments = [u"# We use the file encoding utf-8",
u"coding = 'utf-8'",
u"encoding = 'utf-8'",
]
for comment in encoding_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertTrue(is_encoding_comment(node))
for comment in not_encoding_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertFalse(is_encoding_comment(node))
class TestFuturizeSimple(CodeHandler):
"""
This class contains snippets of Python 2 code (invalid Python 3) and
tests for whether they can be passed to ``futurize`` and immediately
run under both Python 2 again and Python 3.
"""
def test_encoding_comments_kept_at_top(self):
"""
Issues #10 and #97: If there is a source encoding comment line
(PEP 263), is it kept at the top of a module by ``futurize``?
"""
before = """
# coding=utf-8
print 'Hello'
"""
after = """
# coding=utf-8
from __future__ import print_function
print('Hello')
"""
self.convert_check(before, after)
before = """
#!/usr/bin/env python
# -*- coding: latin-1 -*-"
print 'Hello'
"""
after = """
#!/usr/bin/env python
# -*- coding: latin-1 -*-"
from __future__ import print_function
print('Hello')
"""
self.convert_check(before, after)
def test_shebang_blank_with_future_division_import(self):
"""
Issue #43: Is shebang line preserved as the first
line by futurize when followed by a blank line?
"""
before = """
#!/usr/bin/env python
import math
1 / 5
"""
after = """
#!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import math
old_div(1, 5)
"""
self.convert_check(before, after)
def test_shebang_blank_with_print_import(self):
before = """
#!/usr/bin/env python
import math
print 'Hello'
"""
after = """
#!/usr/bin/env python
from __future__ import print_function
import math
print('Hello')
"""
self.convert_check(before, after)
def test_shebang_comment(self):
"""
Issue #43: Is shebang line preserved as the first
line by futurize when followed by a comment?
"""
before = """
#!/usr/bin/env python
# some comments
# and more comments
import math
print 'Hello!'
"""
after = """
#!/usr/bin/env python
# some comments
# and more comments
from __future__ import print_function
import math
print('Hello!')
"""
self.convert_check(before, after)
def test_shebang_docstring(self):
"""
Issue #43: Is shebang line preserved as the first
line by futurize when followed by a docstring?
"""
before = '''
#!/usr/bin/env python
"""
a doc string
"""
import math
print 'Hello!'
'''
after = '''
#!/usr/bin/env python
"""
a doc string
"""
from __future__ import print_function
import math
print('Hello!')
'''
self.convert_check(before, after)
def test_oldstyle_classes(self):
"""
Stage 2 should convert old-style to new-style classes. This makes
the new-style class explicit and reduces the gap between the
behaviour (e.g. method resolution order) on Py2 and Py3. It also
allows us to provide ``newobject`` (see
test_oldstyle_classes_iterator).
"""
before = """
class Blah:
pass
"""
after = """
from builtins import object
class Blah(object):
pass
"""
self.convert_check(before, after, ignore_imports=False)
def test_oldstyle_classes_iterator(self):
"""
An old-style class used as an iterator should be converted
properly. This requires ``futurize`` to do both steps (adding
inheritance from object and adding the newobject import) in the
right order. Any next() method should also be renamed to __next__.
"""
before = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self):
return next(self._iter).upper()
def __iter__(self):
return self
assert list(Upper('hello')) == list('HELLO')
"""
after = """
from builtins import next
from builtins import object
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __next__(self):
return next(self._iter).upper()
def __iter__(self):
return self
assert list(Upper('hello')) == list('HELLO')
"""
self.convert_check(before, after, ignore_imports=False)
# Try it again with this convention: class Upper():
before2 = """
class Upper():
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self):
return next(self._iter).upper()
def __iter__(self):
return self
assert list(Upper('hello')) == list('HELLO')
"""
self.convert_check(before2, after)
@unittest.expectedFailure
def test_problematic_string(self):
""" This string generates a SyntaxError on Python 3 unless it has
an r prefix.
"""
before = r"""
s = 'The folder is "C:\Users"'.
"""
after = r"""
s = r'The folder is "C:\Users"'.
"""
self.convert_check(before, after)
@unittest.skip('--tobytes feature removed for now ...')
def test_tobytes(self):
"""
The --tobytes option converts all UNADORNED string literals 'abcd' to b'abcd'.
It does apply to multi-line strings but doesn't apply if it's a raw
string, because ur'abcd' is a SyntaxError on Python 2 and br'abcd' is a
SyntaxError on Python 3.
"""
before = r"""
s0 = '1234'
s1 = '''5678
'''
s2 = "9abc"
# Unchanged:
s3 = r'1234'
s4 = R"defg"
s5 = u'hijk'
s6 = u"lmno"
s7 = b'lmno'
s8 = b"pqrs"
"""
after = r"""
s0 = b'1234'
s1 = b'''5678
'''
s2 = b"9abc"
# Unchanged:
s3 = r'1234'
s4 = R"defg"
s5 = u'hijk'
s6 = u"lmno"
s7 = b'lmno'
s8 = b"pqrs"
"""
self.convert_check(before, after, tobytes=True)
def test_cmp(self):
before = """
assert cmp(1, 2) == -1
assert cmp(2, 1) == 1
"""
after = """
from past.builtins import cmp
assert cmp(1, 2) == -1
assert cmp(2, 1) == 1
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
def test_execfile(self):
before = """
with open('mytempfile.py', 'w') as f:
f.write('x = 1')
execfile('mytempfile.py')
x += 1
assert x == 2
"""
after = """
from past.builtins import execfile
with open('mytempfile.py', 'w') as f:
f.write('x = 1')
execfile('mytempfile.py')
x += 1
assert x == 2
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
@unittest.expectedFailure
def test_izip(self):
before = """
from itertools import izip
for (a, b) in izip([1, 3, 5], [2, 4, 6]):
pass
"""
after = """
from builtins import zip
for (a, b) in zip([1, 3, 5], [2, 4, 6]):
pass
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
def test_UserList(self):
before = """
from UserList import UserList
a = UserList([1, 3, 5])
assert len(a) == 3
"""
after = """
from collections import UserList
a = UserList([1, 3, 5])
assert len(a) == 3
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=True)
@unittest.expectedFailure
def test_no_unneeded_list_calls(self):
"""
TODO: get this working
"""
code = """
for (a, b) in zip(range(3), range(3, 6)):
pass
"""
self.unchanged(code)
@expectedFailurePY26
def test_import_builtins(self):
before = """
a = raw_input()
b = open(a, b, c)
c = filter(a, b)
d = map(a, b)
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in xrange(10**10):
pass
h = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
super(MyClass, self)
"""
after = """
from builtins import bytes
from builtins import filter
from builtins import input
from builtins import map
from builtins import range
from functools import reduce
a = input()
b = open(a, b, c)
c = list(filter(a, b))
d = list(map(a, b))
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in range(10**10):
pass
h = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
super(MyClass, self)
"""
self.convert_check(before, after, ignore_imports=False, run=False)
def test_xrange(self):
"""
The ``from builtins import range`` line was being added to the
bottom of the file as of v0.11.4, but only using Py2.7's lib2to3.
(Py3.3's lib2to3 seems to work.)
"""
before = """
for i in xrange(10):
pass
"""
after = """
from builtins import range
for i in range(10):
pass
"""
self.convert_check(before, after, ignore_imports=False)
def test_source_coding_utf8(self):
"""
Tests to ensure that the source coding line is not corrupted or
removed. It must be left as the first line in the file (including
before any __future__ imports). Also tests whether the unicode
characters in this encoding are parsed correctly and left alone.
"""
code = """
# -*- coding: utf-8 -*-
icons = [u"◐", u"◓", u"◑", u"◒"]
"""
def test_exception_syntax(self):
"""
Test of whether futurize handles the old-style exception syntax
"""
before = """
try:
pass
except IOError, e:
val = e.errno
"""
after = """
try:
pass
except IOError as e:
val = e.errno
"""
self.convert_check(before, after)
def test_super(self):
"""
This tests whether futurize keeps the old two-argument super() calls the
same as before. It should, because this still works in Py3.
"""
code = '''
class VerboseList(list):
def append(self, item):
print('Adding an item')
super(VerboseList, self).append(item)
'''
self.unchanged(code)
@unittest.expectedFailure
def test_file(self):
"""
file() as a synonym for open() is obsolete and invalid on Python 3.
"""
before = '''
f = file(self.textfilename)
data = f.read()
f.close()
'''
after = '''
f = open(__file__)
data = f.read()
f.close()
'''
self.convert_check(before, after)
def test_apply(self):
before = '''
def addup(*x):
return sum(x)
assert apply(addup, (10,20)) == 30
'''
after = """
def addup(*x):
return sum(x)
assert addup(*(10,20)) == 30
"""
self.convert_check(before, after)
@unittest.skip('not implemented yet')
def test_download_pypi_package_and_test(self):
URL = 'http://pypi.python.org/pypi/{0}/json'
import requests
package = 'future'
r = requests.get(URL.format(package))
pprint.pprint(r.json())
download_url = r.json()['urls'][0]['url']
filename = r.json()['urls'][0]['filename']
# r2 = requests.get(download_url)
# with open('/tmp/' + filename, 'w') as tarball:
# tarball.write(r2.content)
@expectedFailurePY26
def test_raw_input(self):
"""
Passes in a string to the waiting input() after futurize
conversion.
The code is the first snippet from these docs:
http://docs.python.org/2/library/2to3.html
"""
before = """
from io import BytesIO
def greet(name):
print "Hello, {0}!".format(name)
print "What's your name?"
import sys
oldstdin = sys.stdin
sys.stdin = BytesIO(b'Ed\\n')
name = raw_input()
greet(name.decode())
sys.stdin = oldstdin
assert name == b'Ed'
"""
desired = """
from io import BytesIO
def greet(name):
print("Hello, {0}!".format(name))
print("What's your name?")
import sys
oldstdin = sys.stdin
sys.stdin = BytesIO(b'Ed\\n')
name = input()
greet(name.decode())
sys.stdin = oldstdin
assert name == b'Ed'
"""
self.convert_check(before, desired, run=False)
for interpreter in self.interpreters:
p1 = Popen([interpreter, self.tempdir + 'mytestscript.py'],
stdout=PIPE, stdin=PIPE, stderr=PIPE)
(stdout, stderr) = p1.communicate(b'Ed')
self.assertEqual(stderr, b'')
self.assertEqual(stdout, b"What's your name?\nHello, Ed!\n")
def test_literal_prefixes_are_not_stripped(self):
"""
Tests to ensure that the u'' and b'' prefixes on unicode strings and
byte strings are not removed by the futurize script. Removing the
prefixes on Py3.3+ is unnecessary and loses some information -- namely,
that the strings have explicitly been marked as unicode or bytes,
rather than just e.g. a guess by some automated tool about what they
are.
"""
code = '''
s = u'unicode string'
b = b'byte string'
'''
self.unchanged(code)
def test_division(self):
before = """
x = 1 / 2
"""
after = """
from past.utils import old_div
x = old_div(1, 2)
"""
self.convert_check(before, after, stages=[1, 2])
def test_already_future_division(self):
code = """
from __future__ import division
x = 1 / 2
assert x == 0.5
y = 3. / 2.
assert y == 1.5
"""
self.unchanged(code)
class TestFuturizeRenamedStdlib(CodeHandler):
@unittest.skip('Infinite loop?')
def test_renamed_modules(self):
before = """
import ConfigParser
import copy_reg
import cPickle
import cStringIO
"""
after = """
import configparser
import copyreg
import pickle
import io
"""
self.convert_check(before, after)
@unittest.skip('Not working yet ...')
def test_urllib_refactor(self):
# Code like this using urllib is refactored by futurize --stage2 to use
# the new Py3 module names, but ``future`` doesn't support urllib yet.
before = """
import urllib
URL = 'http://pypi.python.org/pypi/future/json'
package = 'future'
r = urllib.urlopen(URL.format(package))
data = r.read()
"""
after = """
from future import standard_library
standard_library.install_aliases()
import urllib.request
URL = 'http://pypi.python.org/pypi/future/json'
package = 'future'
r = urllib.request.urlopen(URL.format(package))
data = r.read()
"""
self.convert_check(before, after)
@unittest.skip('Infinite loop?')
def test_renamed_copy_reg_and_cPickle_modules(self):
"""
Example from docs.python.org/2/library/copy_reg.html
"""
before = """
import copy_reg
import copy
import cPickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copy_reg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = cPickle.dumps(c)
"""
after = """
import copyreg
import copy
import pickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copyreg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = pickle.dumps(c)
"""
self.convert_check(before, after)
@unittest.expectedFailure
def test_Py2_StringIO_module(self):
"""
This requires that the argument to io.StringIO be made a
unicode string explicitly if we're not using unicode_literals:
Ideally, there would be a fixer for this. For now:
TODO: add the Py3 equivalent for this to the docs. Also add back
a test for the unicode_literals case.
"""
before = """
import cStringIO
import StringIO
s1 = cStringIO.StringIO('my string')
s2 = StringIO.StringIO('my other string')
assert isinstance(s1, cStringIO.InputType)
"""
# There is no io.InputType in Python 3. futurize should change this to
# something like this. But note that the input to io.StringIO
# must be a unicode string on both Py2 and Py3.
after = """
import io
import io
s1 = io.StringIO(u'my string')
s2 = io.StringIO(u'my other string')
assert isinstance(s1, io.StringIO)
"""
self.convert_check(before, after)
class TestFuturizeStage1(CodeHandler):
"""
Tests "stage 1": safe optimizations: modernizing Python 2 code so that it
uses print functions, new-style exception syntax, etc.
The behaviour should not change and this should introduce no dependency on
the ``future`` package. It produces more modern Python 2-only code. The
goal is to reduce the size of the real porting patch-set by performing
the uncontroversial patches first.
"""
def test_apply(self):
"""
apply() should be changed by futurize --stage1
"""
before = '''
def f(a, b):
return a + b
args = (1, 2)
assert apply(f, args) == 3
assert apply(f, ('a', 'b')) == 'ab'
'''
after = '''
def f(a, b):
return a + b
args = (1, 2)
assert f(*args) == 3
assert f(*('a', 'b')) == 'ab'
'''
self.convert_check(before, after, stages=[1])
def test_next_1(self):
"""
Custom next methods should not be converted to __next__ in stage1, but
any obj.next() calls should be converted to next(obj).
"""
before = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return next(self._iter).upper()
def __iter__(self):
return self
itr = Upper('hello')
assert itr.next() == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
after = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return next(self._iter).upper()
def __iter__(self):
return self
itr = Upper('hello')
assert next(itr) == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
self.convert_check(before, after, stages=[1], run=PY2)
@unittest.expectedFailure
def test_next_2(self):
"""
This version of the above doesn't currently work: the self._iter.next() call in
line 5 isn't converted to next(self._iter).
"""
before = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return self._iter.next().upper()
def __iter__(self):
return self
itr = Upper('hello')
assert itr.next() == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
after = """
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return next(self._iter).upper()
def __iter__(self):
return self
itr = Upper('hello')
assert next(itr) == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
self.convert_check(before, after, stages=[1], run=PY2)
def test_xrange(self):
"""
xrange should not be changed by futurize --stage1
"""
code = '''
for i in xrange(10):
pass
'''
self.unchanged(code, stages=[1], run=PY2)
@unittest.expectedFailure
def test_absolute_import_changes(self):
"""
Implicit relative imports should be converted to absolute or explicit
relative imports correctly.
Issue #16 (with porting bokeh/bbmodel.py)
"""
with open(self.tempdir + 'specialmodels.py', 'w') as f:
f.write('pass')
before = """
import specialmodels.pandasmodel
specialmodels.pandasmodel.blah()
"""
after = """
from __future__ import absolute_import
from .specialmodels import pandasmodel
pandasmodel.blah()
"""
self.convert_check(before, after, stages=[1])
def test_safe_futurize_imports(self):
"""
The standard library module names should not be changed until stage 2
"""
before = """
import ConfigParser
import HTMLParser
from itertools import ifilterfalse
ConfigParser.ConfigParser
HTMLParser.HTMLParser
assert list(ifilterfalse(lambda x: x % 2, [2, 4])) == [2, 4]
"""
self.unchanged(before, stages=[1], run=PY2)
def test_print(self):
before = """
print 'Hello'
"""
after = """
print('Hello')
"""
self.convert_check(before, after, stages=[1])
before = """
import sys
print >> sys.stderr, 'Hello', 'world'
"""
after = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.convert_check(before, after, stages=[1])
def test_print_already_function(self):
"""
Running futurize --stage1 should not add a second set of parentheses
"""
before = """
print('Hello')
"""
self.unchanged(before, stages=[1])
@unittest.expectedFailure
def test_print_already_function_complex(self):
"""
Running futurize --stage1 does add a second second set of parentheses
in this case. This is because the underlying lib2to3 has two distinct
grammars -- with a print statement and with a print function -- and,
when going forwards (2 to both), futurize assumes print is a statement,
which raises a ParseError.
"""
before = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.unchanged(before, stages=[1])
def test_exceptions(self):
before = """
try:
raise AttributeError('blah')
except AttributeError, e:
pass
"""
after = """
try:
raise AttributeError('blah')
except AttributeError as e:
pass
"""
self.convert_check(before, after, stages=[1])
@unittest.expectedFailure
def test_string_exceptions(self):
"""
2to3 does not convert string exceptions: see
http://python3porting.com/differences.html.
"""
before = """
try:
raise "old string exception"
except Exception, e:
pass
"""
after = """
try:
raise Exception("old string exception")
except Exception as e:
pass
"""
self.convert_check(before, after, stages=[1])
def test_oldstyle_classes(self):
"""
We don't convert old-style classes to new-style automatically in
stage 1 (but we should in stage 2). So Blah should not inherit
explicitly from object yet.
"""
before = """
class Blah:
pass
"""
self.unchanged(before, stages=[1])
def test_stdlib_modules_not_changed(self):
"""
Standard library module names should not be changed in stage 1
"""
before = """
import ConfigParser
import HTMLParser
import collections
print 'Hello'
try:
raise AttributeError('blah')
except AttributeError, e:
pass
"""
after = """
import ConfigParser
import HTMLParser
import collections
print('Hello')
try:
raise AttributeError('blah')
except AttributeError as e:
pass
"""
self.convert_check(before, after, stages=[1], run=PY2)
def test_octal_literals(self):
before = """
mode = 0644
"""
after = """
mode = 0o644
"""
self.convert_check(before, after)
def test_long_int_literals(self):
before = """
bignumber = 12345678901234567890L
"""
after = """
bignumber = 12345678901234567890
"""
self.convert_check(before, after)
def test___future___import_position(self):
"""
Issue #4: __future__ imports inserted too low in file: SyntaxError
"""
code = """
# Comments here
# and here
__version__=''' $Id$ '''
__doc__="A Sequencer class counts things. It aids numbering and formatting lists."
__all__='Sequencer getSequencer setSequencer'.split()
#
# another comment
#
CONSTANTS = [ 0, 01, 011, 0111, 012, 02, 021, 0211, 02111, 013 ]
_RN_LETTERS = "IVXLCDM"
def my_func(value):
pass
''' Docstring-like comment here '''
"""
self.convert(code)
def test_issue_45(self):
"""
Tests whether running futurize -f libfuturize.fixes.fix_future_standard_library_urllib
on the code below causes a ValueError (issue #45).
"""
code = r"""
from __future__ import print_function
from urllib import urlopen, urlencode
oeis_url = 'http://oeis.org/'
def _fetch(url):
try:
f = urlopen(url)
result = f.read()
f.close()
return result
except IOError as msg:
raise IOError("%s\nError fetching %s." % (msg, url))
"""
self.convert(code)
def test_order_future_lines(self):
"""
Tests the internal order_future_lines() function.
"""
before = '''
# comment here
from __future__ import print_function
from __future__ import absolute_import
# blank line or comment here
from future.utils import with_metaclass
from builtins import zzz
from builtins import aaa
from builtins import blah
# another comment
import something_else
code_here
more_code_here
'''
after = '''
# comment here
from __future__ import absolute_import
from __future__ import print_function
# blank line or comment here
from future.utils import with_metaclass
from builtins import aaa
from builtins import blah
from builtins import zzz
# another comment
import something_else
code_here
more_code_here
'''
self.assertEqual(order_future_lines(reformat_code(before)),
reformat_code(after))
@unittest.expectedFailure
def test_issue_12(self):
"""
Issue #12: This code shouldn't be upset by additional imports.
__future__ imports must appear at the top of modules since about Python
2.5.
"""
code = """
from __future__ import with_statement
f = open('setup.py')
for i in xrange(100):
pass
"""
self.unchanged(code)
@expectedFailurePY26
def test_range_necessary_list_calls(self):
"""
On Py2.6 (only), the xrange_with_import fixer somehow seems to cause
l = range(10)
to be converted to:
l = list(list(range(10)))
with an extra list(...) call.
"""
before = """
l = range(10)
assert isinstance(l, list)
for i in range(3):
print i
for i in xrange(3):
print i
"""
after = """
from __future__ import print_function
from builtins import range
l = list(range(10))
assert isinstance(l, list)
for i in range(3):
print(i)
for i in range(3):
print(i)
"""
self.convert_check(before, after)
def test_basestring(self):
"""
The 2to3 basestring fixer breaks working Py2 code that uses basestring.
This tests whether something sensible is done instead.
"""
before = """
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
after = """
from past.builtins import basestring
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
self.convert_check(before, after)
def test_safe_division(self):
"""
Tests whether Py2 scripts using old-style division still work
after futurization.
"""
before = """
x = 3 / 2
y = 3. / 2
assert x == 1 and isinstance(x, int)
assert y == 1.5 and isinstance(y, float)
"""
after = """
from __future__ import division
from past.utils import old_div
x = old_div(3, 2)
y = old_div(3., 2)
assert x == 1 and isinstance(x, int)
assert y == 1.5 and isinstance(y, float)
"""
self.convert_check(before, after)
def test_safe_division_overloaded(self):
"""
If division is overloaded, futurize may produce spurious old_div
calls. This test is for whether the code still works on Py2
despite these calls.
"""
before = """
class Path(str):
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
return Path(str(self) + '/' + str(other))
path1 = Path('home')
path2 = Path('user')
z = path1 / path2
assert isinstance(z, Path)
assert str(z) == 'home/user'
"""
after = """
from __future__ import division
from past.utils import old_div
class Path(str):
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
return Path(str(self) + '/' + str(other))
path1 = Path('home')
path2 = Path('user')
z = old_div(path1, path2)
assert isinstance(z, Path)
assert str(z) == 'home/user'
"""
self.convert_check(before, after)
def test_basestring_issue_156(self):
before = """
x = str(3)
allowed_types = basestring, int
assert isinstance('', allowed_types)
assert isinstance(u'', allowed_types)
assert isinstance(u'foo', basestring)
"""
after = """
from builtins import str
from past.builtins import basestring
x = str(3)
allowed_types = basestring, int
assert isinstance('', allowed_types)
assert isinstance(u'', allowed_types)
assert isinstance(u'foo', basestring)
"""
self.convert_check(before, after)
class TestConservativeFuturize(CodeHandler):
@unittest.expectedFailure
def test_basestring(self):
"""
In conservative mode, futurize would not modify "basestring"
but merely import it from ``past``, and the following code would still
run on both Py2 and Py3.
"""
before = """
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
after = """
from past.builtins import basestring
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
self.convert_check(before, after, conservative=True)
@unittest.expectedFailure
def test_open(self):
"""
In conservative mode, futurize would not import io.open because
this changes the default return type from bytes to text.
"""
before = """
filename = 'temp_file_open.test'
contents = 'Temporary file contents. Delete me.'
with open(filename, 'w') as f:
f.write(contents)
with open(filename, 'r') as f:
data = f.read()
assert isinstance(data, str)
assert data == contents
"""
after = """
from past.builtins import open, str as oldbytes, unicode
filename = oldbytes(b'temp_file_open.test')
contents = oldbytes(b'Temporary file contents. Delete me.')
with open(filename, oldbytes(b'w')) as f:
f.write(contents)
with open(filename, oldbytes(b'r')) as f:
data = f.read()
assert isinstance(data, oldbytes)
assert data == contents
assert isinstance(oldbytes(b'hello'), basestring)
assert isinstance(unicode(u'hello'), basestring)
assert isinstance(oldbytes(b'hello'), basestring)
"""
self.convert_check(before, after, conservative=True)
class TestFuturizeAllImports(CodeHandler):
"""
Tests "futurize --all-imports".
"""
@expectedFailurePY26
def test_all_imports(self):
before = """
import math
import os
l = range(10)
assert isinstance(l, list)
print 'Hello'
for i in xrange(100):
pass
print('Hello')
"""
after = """
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
import math
import os
l = list(range(10))
assert isinstance(l, list)
print('Hello')
for i in range(100):
pass
print('Hello')
"""
self.convert_check(before, after, all_imports=True)
if __name__ == '__main__':
unittest.main()
| unlicense |
jundongl/PyFeaST | skfeature/example/test_CFS.py | 3 | 1465 | import scipy.io
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import accuracy_score
from skfeature.function.statistical_based import CFS
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of selected features on training set
idx = CFS.cfs(X[train], y[train])
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | gpl-2.0 |
nschloe/matplotlib2tikz | test/refresh_reference_files.py | 1 | 1142 | # -*- coding: utf-8 -*-
#
import argparse
import os
import importlib.util
import matplotlib2tikz as m2t
import matplotlib.pyplot as plt
def _main():
parser = argparse.ArgumentParser(description="Refresh the reference TeX files.")
parser.add_argument("files", nargs="+", help="Files to refresh")
args = parser.parse_args()
this_dir = os.path.dirname(os.path.abspath(__file__))
exclude_list = ["test_rotated_labels.py", "test_deterministic_output.py"]
for filename in args.files:
if filename in exclude_list:
continue
if filename.startswith("test_") and filename.endswith(".py"):
spec = importlib.util.spec_from_file_location("plot", filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
module.plot()
code = m2t.get_tikz_code(include_disclaimer=False)
plt.close()
tex_filename = filename[:-3] + "_reference.tex"
with open(os.path.join(this_dir, tex_filename), "w") as f:
f.write(code)
return
if __name__ == "__main__":
_main()
| mit |
maheshakya/scikit-learn | sklearn/utils/__init__.py | 2 | 13294 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite, warn_if_not_float,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .class_weight import compute_class_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"warn_if_not_float",
"check_random_state",
"compute_class_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable']
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
return X.iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
arrays = [check_array(x, accept_sparse='csr', ensure_2d=False,
allow_nd=True) for x in arrays]
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
imaculate/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
jayflo/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
Achuth17/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
ron1818/Singaboat_RobotX2016 | robotx_nav/nodes/task2_toplevel.py | 3 | 4821 | #!/usr/bin/env python
import multiprocessing as mp
import rospy
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
import numpy as np
from sklearn.cluster import KMeans, DBSCAN
from sklearn import svm
from move_base_loiter import Loiter
from move_base_waypoint import MoveTo
from color_totem_planner import ColorTotemPlanner
# import tf
# from math import pi, cos, sin
# from move_base_util import MoveBaseUtil
import time
def loiter_worker(res_q, data_q):
""" go to gps point """
p = mp.current_process()
print p.name, p.pid, 'Starting'
loiter_obj = Loiter("loiter", is_newnode=True, target=None,
radius=2.5, polygon=4, is_ccw=True, is_relative=False)
# spawn the gps coordinate, one time only
while True:
cid, target, radius, is_ccw = data_q.get()
print "from planner", target
if target[2] < -1e6: # unless send a -inf z by waypoint pub: terminating
break
else:
loiter_obj.respawn(target=target, radius=radius, is_ccw=is_ccw)
res_q.put(False) # hold_loiter, id of assignment
print p.name, p.pid, 'Exiting'
def moveto_worker(res_q, data_q):
""" constant heading to pass the gate,
need roi_target_identifier to give/update waypoint """
p = mp.current_process()
print p.name, p.pid, 'Starting'
# get the waypoints, loop wait for updates
moveto_obj = MoveTo("moveto", is_newnode=True, target=None, mode=1, mode_param=1, is_relative=False)
while True:
target = data_q.get()
if target[2] < -1e6: # unless send a -inf z by waypoint pub: terminating
break
else:
moveto_obj.respawn(target)
res_q.put(False)
print p.name, p.pid, 'Exiting'
>>>>>>> ca922447663b0d1c6f5d0a4059b7a7e404984b99
# not required
# def cancel_goal_worker(conn, repetition):
# """ asynchronously cancel goals"""
# p = mp.current_process()
# print p.name, p.pid, 'Starting'
# while True:
# command = conn.recv()
# print 'child: ', command
# if command == 'cancel': # cancel goal
# print 'doing cancelling'
# force_cancel = ForceCancel(nodename="forcecancel", repetition=repetition)
# conn.send('cancelled')
# elif command == 'exit': # complete
# print "cancel goal complete, exit"
# break
# else: # conn.recv() == 0, idle, wait for command
# pass
# time.sleep()
#
# print p.name, p.pid, 'Exiting'
def planner_worker(loiter_res_q, loiter_data_q, moveto_res_q, moveto_data_q):
""" plan for totems """
p = mp.current_process()
print p.name, p.pid, 'Starting'
planner_obj = ColorTotemPlanner("color_planner")
while True:
if not loiter_res_q.empty(): # get update from loiter
# hold_loiter, visit_id = loiter_res_q.get()
hold_loiter = loiter_res_q.get()
planner_obj.update_loiter(hold_loiter) # update loiter and visit
if not moveto_res_q.empty(): # get update from moveto on success
planner_obj.update_hold_moveto(moveto_res_q.get())
totem_find, loiter_target, moveto_target, allvisited = planner_obj.planner() # try to find onhold loiter target
# print isready
if allvisited: # all visited, kill all worker and exit
poison_pill = [0, 0, -float("inf")]
loiter_data_q.put([None, poison_pill, None, None])
# need an exit target
if moveto_target != []:
moveto_data_q.put(moveto_target)
# finally kill moveto
time.sleep(1)
moveto_data_q.put(poison_pill)
break
elif loiter_target != [] and moveto_target == []: # still have pending loiter points
print "loiter called"
loiter_data_q.put(loiter_target)
elif loiter_target == [] and moveto_target != []: # need to moveto
print "moveto called"
moveto_data_q.put(moveto_target)
time.sleep(1)
print p.name, p.pid, 'Exiting'
if __name__ == "__main__":
moveto_data_q = mp.Queue()
moveto_res_q = mp.Queue()
loiter_data_q = mp.Queue()
loiter_res_q = mp.Queue()
# manager = mp.Manager()
# visited_dict = manager.dict()
# visited_dict = {"red": False, "green": False, "blue": False, "yellow": False}
loiter_mp = mp.Process(name="ltr", target=loiter_worker, args=(loiter_res_q, loiter_data_q,))
moveto_mp = mp.Process(name="mvt", target=moveto_worker, args=(moveto_res_q, moveto_data_q,))
planner_mp = mp.Process(name="pln", target=planner_worker, args=(loiter_res_q, loiter_data_q, moveto_res_q, moveto_data_q,))
loiter_mp.start()
moveto_mp.start()
# cancel_goal_mp.start()
planner_mp.start()
# close
loiter_mp.join()
moveto_mp.join()
planner_mp.join()
| gpl-3.0 |
iismd17/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
apache/spark | python/pyspark/sql/tests/test_pandas_udf_typehints.py | 22 | 9603 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from typing import Union, Iterator, Tuple
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
from pyspark.sql import Row
if have_pandas:
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(
iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]]
) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
def func(col: pd.Series) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, col1: pd.Series) -> int:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, *args: pd.Series) -> Row:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
def func(col: str) -> pd.Series:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.DataFrame, col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(func))
def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series) -> Tuple[pd.DataFrame]:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(func))
def func(col, *args: pd.Series) -> pd.Series:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *, col2) -> pd.DataFrame:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(v: Union[pd.Series, pd.DataFrame]) -> pd.Series:
return v + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(itr: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in itr:
yield s + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def weighted_mean(v: pd.Series, w: pd.Series) -> float:
return np.average(v, weights=w)
weighted_mean = pandas_udf("double")(weighted_mean)
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:
return v + 1
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
return left + 1
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
return map(lambda v: v + 1, iter)
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import * # noqa: #401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
heli522/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/test_quantile.py | 10 | 15893 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas import (DataFrame, Series, Timestamp, _np_version_under1p11)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameQuantile(TestData):
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
assert q['A'] == percentile(self.tsframe['A'], 10)
tm.assert_index_equal(q.index, self.tsframe.columns)
q = self.tsframe.quantile(0.9, axis=1)
assert (q['2000-01-17'] ==
percentile(self.tsframe.loc['2000-01-17'], 90))
tm.assert_index_equal(q.index, self.tsframe.index)
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1': ['A', 'A', 'B', 'B'], 'col2': [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median().rename(0.5)
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1], name=0.5)
assert_series_equal(result, expected)
def test_quantile_axis_mixed(self):
# mixed on axis=1
df = DataFrame({"A": [1, 2, 3],
"B": [2., 3., 4.],
"C": pd.date_range('20130101', periods=3),
"D": ['foo', 'bar', 'baz']})
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], name=0.5)
assert_series_equal(result, expected)
# must raise
def f():
df.quantile(.5, axis=1, numeric_only=False)
pytest.raises(TypeError, f)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"], name=0.5)
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
pytest.raises(ValueError, df.quantile, 0.1, axis=-1)
pytest.raises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_interpolation(self):
# see gh-10174
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
assert q['A'] == percentile(self.tsframe['A'], 10)
q = self.intframe.quantile(0.1)
assert q['A'] == percentile(self.intframe['A'], 10)
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
assert q1['A'] == np.percentile(self.intframe['A'], 10)
tm.assert_series_equal(q, q1)
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(np.array([[1, 2, 3], [2, 3, 4]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='int64')
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"A": [1., 2., 3.], "B": [2., 3., 4.]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1., 2., 3.], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
exp = np.percentile(np.array([[1., 2., 3.], [2., 3., 4.]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='float64')
assert_series_equal(result, expected)
# axis
result = df.quantile([.5, .75], axis=1, interpolation='lower')
expected = DataFrame({1: [1., 1.], 2: [2., 2.],
3: [3., 3.]}, index=[0.5, 0.75])
assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({'x': [], 'y': []})
q = df.quantile(0.1, axis=0, interpolation='higher')
assert(np.isnan(q['x']) and np.isnan(q['y']))
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
if _np_version_under1p11:
expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
index=[.25, .5], columns=['a', 'b', 'c'])
else:
expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'],
name=0.5)
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1],
name=0.5)
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
# empty when numeric_only=True
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# result = df[['a', 'c']].quantile(.5)
# result = df[['a', 'c']].quantile([.5])
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.tsframe.quantile(invalid)
def test_quantile_box(self):
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]})
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')],
name=0.5, index=['A', 'B', 'C'])
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')]],
index=[0.5], columns=['A', 'B', 'C'])
tm.assert_frame_equal(res, exp)
# DatetimeBlock may be consolidated and contain NaT in different loc
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.NaT,
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.NaT],
'c': [pd.NaT,
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]},
columns=list('AaBbCc'))
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')],
name=0.5, index=list('AaBbCc'))
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')]],
index=[0.5], columns=list('AaBbCc'))
tm.assert_frame_equal(res, exp)
def test_quantile_nan(self):
# GH 14357 - float block where some cols have missing values
df = DataFrame({'a': np.arange(1, 6.0), 'b': np.arange(1, 6.0)})
df.iloc[-1, 1] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, 2.5], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [2.5, 3.25]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1)
exp = Series(np.arange(1.0, 6.0), name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], axis=1)
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
# full-nan column
df['b'] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [np.nan, np.nan]},
index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
def test_quantile_nat(self):
# full NaT column
df = DataFrame({'a': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.NaT], index=['a'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame({'a': [pd.NaT]}, index=[0.5])
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
df = DataFrame({'a': [pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03')],
'b': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.Timestamp('2012-01-02'), pd.NaT], index=['a', 'b'],
name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame([[pd.Timestamp('2012-01-02'), pd.NaT]], index=[0.5],
columns=['a', 'b'])
tm.assert_frame_equal(res, exp)
def test_quantile_empty(self):
# floats
df = DataFrame(columns=['a', 'b'], dtype='float64')
res = df.quantile(0.5)
exp = Series([np.nan, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5])
exp = DataFrame([[np.nan, np.nan]], columns=['a', 'b'], index=[0.5])
tm.assert_frame_equal(res, exp)
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5, axis=1)
# res = df.quantile([0.5], axis=1)
# ints
df = DataFrame(columns=['a', 'b'], dtype='int64')
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5)
# datetimes
df = DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
# FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
# res = df.quantile(0.5, numeric_only=False)
| apache-2.0 |
henridwyer/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
simonsfoundation/CaImAn | use_cases/granule_cells/prepare_nice_image.py | 2 | 39221 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 24 17:06:17 2016
@author: agiovann
"""
from __future__ import division
from __future__ import print_function
#%%
from builtins import str
from builtins import range
from past.utils import old_div
try:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
print((1))
except:
print('Not launched under iPython')
import matplotlib as mpl
mpl.use('TKAgg')
from matplotlib import pyplot as plt
# plt.ion()
import sys
import numpy as np
# sys.path.append('../SPGL1_python_port')
#%
from time import time
from scipy.sparse import coo_matrix
import tifffile
import subprocess
import time as tm
from time import time
import pylab as pl
import psutil
import glob
import os
import scipy
from ipyparallel import Client
import caiman as cm
from caiman.components_evaluation import evaluate_components
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.base.rois import extract_binary_masks_blob
from caiman.source_extraction import cnmf as cnmf
#%%
# backend='SLURM'
backend = 'local'
if backend == 'SLURM':
n_processes = np.int(os.environ.get('SLURM_NPROCS'))
else:
# roughly number of cores on your machine minus 1
n_processes = np.maximum(np.int(psutil.cpu_count()), 1)
print(('using ' + str(n_processes) + ' processes'))
#%% start cluster for efficient computation
single_thread = False
if single_thread:
dview = None
else:
try:
c.close()
except:
print('C was not existing, creating one')
print("Stopping cluster to avoid unnencessary use of memory....")
sys.stdout.flush()
if backend == 'SLURM':
try:
cm.stop_server(is_slurm=True)
except:
print('Nothing to stop')
slurm_script = '/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh'
cm.start_server(slurm_script=slurm_script)
pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
c = Client(ipython_dir=pdir, profile=profile)
else:
cm.stop_server()
cm.start_server()
c = Client()
print(('Using ' + str(len(c)) + ' processes'))
dview = c[:len(c)]
#%%
os.chdir('/mnt/ceph/users/agiovann/ImagingData/eyeblink/b38/20160706154257')
fls = []
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith("1.npz"):
print((os.path.join(root, file)))
fls.append(os.path.join(root, file))
fls.sort()
for fl in fls:
print(fl)
with np.load(fl) as ld:
print((list(ld.keys())))
tmpls = ld['template']
lq, hq = np.percentile(tmpls, [5, 95])
pl.imshow(tmpls, cmap='gray', vmin=lq, vmax=hq)
pl.pause(.001)
pl.cla()
#%%
all_movs = []
for f in fls:
with np.load(f) as fl:
print(f)
# pl.subplot(1,2,1)
# pl.imshow(fl['template'],cmap=pl.cm.gray)
# pl.subplot(1,2,2)
all_movs.append(fl['template'][np.newaxis, :, :])
# pl.plot(fl['shifts'])
# pl.pause(.001)
all_movs = cb.movie(np.concatenate(all_movs, axis=0), fr=30)
all_movs, shifts, _, _ = all_movs.motion_correct(20, 20, template=None)
all_movs[30:80].play(backend='opencv', gain=5., fr=10)
all_movs = all_movs[30:80]
fls = fls[30:80]
final_template = np.median(all_movs, 0)
#%%
new_fls = []
for fl in fls:
new_fls.append(fl[:-3] + 'tif')
#%%
file_res = cb.motion_correct_parallel(new_fls, fr=6, template=final_template, margins_out=0,
max_shift_w=25, max_shift_h=25, dview=c[:], apply_smooth=True, save_hdf5=False, remove_blanks=False)
#%%
xy_shifts = []
for fl in new_fls:
if os.path.exists(fl[:-3] + 'npz'):
print((fl[:-3] + 'npz'))
with np.load(fl[:-3] + 'npz') as ld:
xy_shifts.append(ld['shifts'])
else:
raise Exception('*********************** ERROR, FILE NOT EXISTING!!!')
#%%
resize_facts = (1, 1, .2)
name_new = cm.save_memmap_each(
new_fls, dview=c[:], base_name=None, resize_fact=resize_facts, remove_init=0, xy_shifts=xy_shifts)
#%%
fname_new = cm.save_memmap_join(
name_new, base_name='TOTAL_', n_chunks=6, dview=c[:])
#%%
m = cm.load('TOTAL__d1_512_d2_512_d3_1_order_C_frames_2300_.mmap', fr=6)
#%%
tmp = np.median(m, 0)
#%%
Cn = m.local_correlations(eight_neighbours=True, swap_dim=False)
pl.imshow(Cn, cmap='gray')
#%%
lq, hq = np.percentile(tmp, [10, 98])
pl.imshow(tmp, cmap='gray', vmin=lq, vmax=hq)
#%%
pl.imshow(tmp[10:160, 120:450], cmap='gray', vmin=lq, vmax=hq)
#%%
m1 = m[:, 10:160, 120:450]
m1.save('MOV_EXAMPLE_20160706154257.tif')
#%%
name_new = cm.save_memmap_each(
['MOV_EXAMPLE_20160706154257.tif'], dview=c[:], base_name=None)
#%%
n_chunks = 6 # increase this number if you have memory issues at this point
fname_new = cm.save_memmap_join(
name_new, base_name='MOV_EXAMPLE_20160706154257__', n_chunks=6, dview=dview)
#%%
Yr, dims, T = cm.load_memmap(
'MOV_EXAMPLE_20160706154257___d1_150_d2_330_d3_1_order_C_frames_2300_.mmap')
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T,), order='F')
#%%
Cn = cm.local_correlations(Y[:, :, :3000], swap_dim=True)
pl.imshow(Cn, cmap='gray')
#%%
rf = 10 # half-size of the patches in pixels. rf=25, patches are 50x50
stride = 4 # amounpl.it of overlap between the patches in pixels
K = 4 # number of neurons expected per patch
gSig = [5, 5] # expected half size of neurons
merge_thresh = 0.8 # merging threshold, max correlation allowed
p = 2 # order of the autoregressive system
memory_fact = 1 # unitless number accounting how much memory should be used. You will need to try different values to see which one would work the default is OK for a 16 GB system
save_results = False
#%% RUN ALGORITHM ON PATCHES
cnm = cnmf.CNMF(n_processes, k=K, gSig=gSig, merge_thresh=0.8, p=0, dview=dview, Ain=None,
rf=rf, stride=stride, memory_fact=memory_fact,
method_init='greedy_roi', alpha_snmf=10e2)
cnm = cnm.fit(images)
A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
print(('Number of components:' + str(A_tot.shape[-1])))
#%%
final_frate = 2 # approx final rate (after eventual downsampling )
tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
Npeaks = 10
traces = C_tot + YrA_tot
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = evaluate_components(
Y, traces, A_tot, C_tot, b_tot, f_tot, remove_baseline=True, N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)
idx_components_r = np.where(r_values >= .4)[0]
idx_components_raw = np.where(fitness_raw < -20)[0]
idx_components_delta = np.where(fitness_delta < -10)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(('Keeping ' + str(len(idx_components)) +
' and discarding ' + str(len(idx_components_bad))))
#%%
pl.figure()
crd = plot_contours(A_tot.tocsc()[:, idx_components], Cn, thr=0.9)
#%%
A_tot = A_tot.tocsc()[:, idx_components]
C_tot = C_tot[idx_components]
#%%
cnm = cnmf.CNMF(n_processes, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, Cin=C_tot,
f_in=f_tot, rf=None, stride=None)
cnm = cnm.fit(images)
#%%
A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
#%%
final_frate = 1
tB = np.minimum(-2, np.floor(-5. / 30 * final_frate))
tA = np.maximum(5, np.ceil(25. / 30 * final_frate))
Npeaks = 10
traces = C + YrA
# traces_a=traces-scipy.ndimage.percentile_filter(traces,8,size=[1,np.shape(traces)[-1]/5])
# traces_b=np.diff(traces,axis=1)
fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, significant_samples = \
evaluate_components(Y, traces, A, C, b, f, remove_baseline=True,
N=5, robust_std=False, Athresh=0.1, Npeaks=Npeaks, tB=tB, tA=tA, thresh_C=0.3)
idx_components_r = np.where(r_values >= .5)[0]
idx_components_raw = np.where(fitness_raw < -50)[0]
idx_components_delta = np.where(fitness_delta < -30)[0]
min_radius = gSig[0] - 2
# masks_ws, idx_blobs, idx_non_blobs = extract_binary_masks_blob(
# A.tocsc(), min_radius, dims, num_std_threshold=1,
# minCircularity=0.5, minInertiaRatio=0.2, minConvexity=.7)
#% LOOK FOR BLOB LIKE STRUCTURES!
masks_ws, is_blob, is_non_blob = cm.base.rois.extract_binary_masks_blob_parallel(A.tocsc(), min_radius, dims, num_std_threshold=1,
minCircularity=0.1, minInertiaRatio=0.1, minConvexity=.1, dview=dview)
idx_blobs = np.where(is_blob)[0]
idx_non_blobs = np.where(is_non_blob)[0]
idx_components = np.union1d(idx_components_r, idx_components_raw)
idx_components = np.union1d(idx_components, idx_components_delta)
idx_blobs = np.intersect1d(idx_components, idx_blobs)
idx_components_bad = np.setdiff1d(list(range(len(traces))), idx_components)
print(' ***** ')
print((len(traces)))
print((len(idx_components)))
print((len(idx_blobs)))
#%%
save_results = False
if save_results:
np.savez('results_analysis.npz', Cn=Cn, A=A.todense(), C=C, b=b, f=f, YrA=YrA, sn=sn,
d1=d1, d2=d2, idx_components=idx_components, idx_components_bad=idx_components_bad)
scipy.io.savemat('results_analysis.mat', {'C': Cn, 'A': A.toarray(), 'C': C, 'b': b, 'f': f, 'YrA': YrA,
'sn': sn, 'd1': d1, 'd2': d2, 'idx_components': idx_components, 'idx_components_blobs': idx_blobs})
np.savez('results_blobs.npz', spatial_comps=A.tocsc().toarray().reshape(dims + (-1,), order='F').transpose(
[2, 0, 1]), masks=masks_ws, idx_components=idx_components, idx_blobs=idx_blobs, idx_components_bad=idx_components_bad)
#%% visualize components
# pl.figure();
pl.subplot(1, 3, 1)
crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=0.9)
pl.subplot(1, 3, 2)
crd = plot_contours(A.tocsc()[:, idx_blobs], Cn, thr=0.9)
pl.subplot(1, 3, 3)
crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=0.9)
#%%
#idx_very_nice=[2, 19, 23, 27,32,43,45,49,51,94,100]
# idx_very_nice=np.array(idx_very_nice)[np.array([3,4,8,10])]
# idx_very_nice=idx_blobs[idx_very_nice]
idx_very_nice = idx_blobs
view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_very_nice]), C[
idx_very_nice, :], b, f, dims[0], dims[1], YrA=YrA[idx_very_nice, :], img=Cn)
#%%
new_m = cm.movie(np.reshape(A.tocsc()[
:, idx_blobs] * C[idx_blobs] + b.dot(f), dims + (-1,), order='F').transpose([2, 0, 1]))
new_m.play(fr=30, backend='opencv', gain=7., magnification=3.)
#%%
new_m = cm.movie(np.reshape(A.tocsc()[:, idx_blobs] * C[idx_blobs] +
b * np.median(f), dims + (-1,), order='F').transpose([2, 0, 1]))
new_m.play(fr=30, backend='opencv', gain=7., magnification=3.)
#%%
new_m = cm.movie(np.reshape(A.tocsc()[
:, idx_blobs] * C[idx_blobs], dims + (-1,), order='F').transpose([2, 0, 1]))
new_m.play(fr=30, backend='opencv', gain=30., magnification=3.)
#%%
# idx_to_show=[0,1,5,8,14,17,18,23,24,25,26,28,29,31,32,33,34,36,43,45,47,51,53,54,57,60,61,62,63,64,65,66,67,71,72,74,75,78,79,80,81,91,95,96,97,99,102]
#cm.view_patches_bar(Yr,scipy.sparse.coo_matrix(A.tocsc()[:,sure_in_idx[idx_to_show]]),C[sure_in_idx[idx_to_show],:],b,f, dims[0],dims[1], YrA=YrA[sure_in_idx[idx_to_show],:],img=np.mean(Y,-1))
#%%
# idx_to_show=[0,1,5,8,14,17,18,23,24,25,26,28,29,31,32,33,34,36,43,45,47,51,53,54,57,60,61,62,63,64,65,66,67,71,72,74,75,78,79,80,81,91,95,96,97,99,102]
# idx_to_show=np.array(idx_to_show)[[2,19,23,26,34]]
#%%
import numpy as np
import caiman as cm
import scipy
with np.load('results_analysis.npz') as ld:
locals().update(ld)
A = scipy.sparse.coo_matrix(A)
with np.load('results_blobs.npz') as ld:
locals().update(ld)
m = cm.load(
'MOV_EXAMPLE_20160706154257___d1_150_d2_330_d3_1_order_C_frames_2300_.mmap')
Yr, dims, T = cm.load_memmap(
'MOV_EXAMPLE_20160706154257___d1_150_d2_330_d3_1_order_C_frames_2300_.mmap')
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
Y = np.reshape(Yr, dims + (T,), order='F')
#%%
ylimit = 100
pl.figure()
pl.subplot(3, 1, 1)
pl.imshow(np.mean(Y, -1), cmap='gray', vmin=10, vmax=60)
pl.ylim([0, ylimit])
pl.axis('off')
pl.subplot(3, 1, 2)
pl.imshow(np.mean(Y, -1), cmap='gray', vmin=10, vmax=60)
msk = np.reshape(A.tocsc()[:, sure_in_idx[7]].sum(-1), dims, order='F')
msk[msk < 0.01] = np.nan
pl.imshow(msk, cmap='Greens', alpha=.3)
msk = np.reshape(
A.tocsc()[:, sure_in_idx[idx_to_show]].sum(-1), dims, order='F')
msk[msk < 0.01] = np.nan
pl.ylim([0, ylimit])
pl.imshow(msk, cmap='hot', alpha=.3)
pl.axis('off')
pl.subplot(3, 1, 3)
pl.imshow(np.reshape(
A.tocsc()[:, sure_in_idx[idx_to_show]].mean(-1), dims, order='F'), cmap='hot')
pl.ylim([0, ylimit])
pl.axis('off')
font = {'family': 'Myriad Pro',
'weight': 'regular',
'size': 30}
pl.rc('font', **font)
#%
pl.figure()
counter = 0
for iid in sure_in_idx[np.hstack([idx_to_show, 7])]:
counter += 1
pl.subplot(7, 7, counter)
mmsk = np.reshape(A.tocsc()[:, iid].todense(), dims, order='F')
cx, cy = scipy.ndimage.measurements.center_of_mass(np.array(mmsk))
cx = np.int(cx)
cy = np.int(cy)
print((cx, cy))
pl.imshow(mmsk[np.maximum(cx - 15, 0):cx + 15,
np.maximum(cy - 15, 0):cy + 15], cmap='gray')
pl.ylim([0, 30])
pl.axis('off')
pl.title(np.hstack([idx_to_show, 7])[counter - 1])
font = {'family': 'Myriad Pro',
'weight': 'regular',
'size': 30}
pl.rc('font', **font)
#%
pl.figure()
m = np.array(Yr)
bckg_1 = b.dot(f)
nA = (A.power(2)).sum(0)
m = m - bckg_1
Y_r_sig = A.T.dot(m)
Y_r_sig = scipy.sparse.linalg.spsolve(
scipy.sparse.spdiags(np.sqrt(nA), 0, nA.size, nA.size), Y_r_sig)
Y_r_bl = A.T.dot(bckg_1)
Y_r_bl = scipy.sparse.linalg.spsolve(
scipy.sparse.spdiags(np.sqrt(nA), 0, nA.size, nA.size), Y_r_bl)
Y_r_bl = cm.mode_robust(Y_r_bl, 1)
trs = old_div(Y_r_sig, Y_r_bl[:, np.newaxis])
cb.trace(trs[np.hstack([sure_in_idx[idx_to_show], 7])].T, fr=6).plot()
# pl.figure()
# cb.trace(trs[sure_in_idx[7]].T,fr=6).plot()
# %%
# done '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160627105123/'
# errors: '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160623161504/',
# base_folders=[
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160627154015/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160624105838/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160625132042/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160626175708/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160627110747/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160628100247/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160705103903/',
#
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160628162522/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160629123648/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160630120544/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160701113525/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160702152950/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160703173620/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160704130454/',
# ]
# error: '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160711104450/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160712105933/',
# base_folders=[
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160710134627/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160710193544/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160711164154/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160711212316/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160712101950/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160712173043/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160713100916/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160713171246/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160714094320/',
# '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160714143248/'
# ]
# for base_folder in base_folders:
# img_descr=cb.utils.get_image_description_SI(glob(base_folder+'2016*.tif')[0])[0]
# f_rate=img_descr['scanimage.SI.hRoiManager.scanFrameRate']
# print f_rate
# #%%
# fls=glob(os.path.join(base_folder,'2016*.tif'))
# fls.sort()
# print fls
# # verufy they are ordered
# #%%
# triggers_img,trigger_names_img=gc.extract_triggers(fls,read_dictionaries=False)
# np.savez(base_folder+'all_triggers.npz',triggers=triggers_img,trigger_names=trigger_names_img)
# #%% get information from eyelid traces
# t_start=time()
# camera_file=glob(os.path.join(base_folder,'*_cam2.h5'))
# assert len(camera_file)==1, 'there are none or two camera files'
# res_bt=gc.get_behavior_traces(camera_file[0],t0=0,t1=8.0,freq=60,ISI=.25,draw_rois=False,plot_traces=False,mov_filt_1d=True,window_lp=5)
# t_end=time()-t_start
# print t_end
# #%%
# np.savez(base_folder+'behavioral_traces.npz',**res_bt)
# #%%
# with np.load(base_folder+'behavioral_traces.npz') as ld:
# res_bt=dict(**ld)
# #%%
# pl.close()
# tm=res_bt['time']
# f_rate_bh=1/np.median(np.diff(tm))
# ISI=res_bt['trial_info'][0][3]-res_bt['trial_info'][0][2]
# eye_traces=np.array(res_bt['eyelid'])
# idx_CS_US=res_bt['idx_CS_US']
# idx_US=res_bt['idx_US']
# idx_CS=res_bt['idx_CS']
#
# idx_ALL=np.sort(np.hstack([idx_CS_US,idx_US,idx_CS]))
# eye_traces,amplitudes_at_US, trig_CRs=gc.process_eyelid_traces(eye_traces,tm,idx_CS_US,idx_US,idx_CS,thresh_CR=.15,time_CR_on=-.1,time_US_on=.05)
#
# idxCSUSCR = trig_CRs['idxCSUSCR']
# idxCSUSNOCR = trig_CRs['idxCSUSNOCR']
# idxCSCR = trig_CRs['idxCSCR']
# idxCSNOCR = trig_CRs['idxCSNOCR']
# idxNOCR = trig_CRs['idxNOCR']
# idxCR = trig_CRs['idxCR']
# idxUS = trig_CRs['idxUS']
# idxCSCSUS=np.concatenate([idx_CS,idx_CS_US])
#
#
# pl.plot(tm,np.mean(eye_traces[idxCSUSCR],0))
# pl.plot(tm,np.mean(eye_traces[idxCSUSNOCR],0))
# pl.plot(tm,np.mean(eye_traces[idxCSCR],0))
# pl.plot(tm,np.mean(eye_traces[idxCSNOCR],0))
# pl.plot(tm,np.mean(eye_traces[idx_US],0))
# pl.legend(['idxCSUSCR','idxCSUSNOCR','idxCSCR','idxCSNOCR','idxUS'])
# pl.xlabel('time to US (s)')
# pl.ylabel('eyelid closure')
# plt.axvspan(-ISI,ISI, color='g', alpha=0.2, lw=0)
# plt.axvspan(0,0.03, color='r', alpha=0.2, lw=0)
#
# pl.xlim([-.5,1])
# pl.savefig(base_folder+'behavioral_traces.pdf')
# #%%
# #pl.close()
# #bins=np.arange(0,1,.01)
# #pl.hist(amplitudes_at_US[idxCR],bins=bins)
# #pl.hist(amplitudes_at_US[idxNOCR],bins=bins)
# #pl.savefig(base_folder+'hist_behav.pdf')
#
#
# #%%
# pl.close()
# f_results= glob(base_folder+'*results_analysis.npz')
# f_results.sort()
# for rs in f_results:
# print rs
# #%% load results and put them in lists
# A_s,C_s,YrA_s, Cn_s, b_s, f_s, shape = gc.load_results(f_results)
# B_s, lab_imgs, cm_s = gc.threshold_components(A_s,shape, min_size=5,max_size=50,max_perc=.5)
# #%%
# if not batch_mode:
# for i,A_ in enumerate(B_s):
# sizes=np.array(A_.sum(0)).squeeze()
# pl.subplot(2,3,i+1)
# pl.imshow(np.reshape(A_.sum(1),shape,order='F'),cmap='gray',vmax=.5)
# #%% compute mask distances
# if len(B_s)>1:
# max_dist=30
# D_s=gc.distance_masks(B_s,cm_s,max_dist)
# np.savez(base_folder+'distance_masks.npz',D_s=D_s)
# #%%
# if not batch_mode:
# for ii,D in enumerate(D_s):
# pl.subplot(3,3,ii+1)
# pl.imshow(D,interpolation='None')
#
# #%% find matches
# matches,costs = gc.find_matches(D_s, print_assignment=False)
# #%%
# neurons=gc.link_neurons(matches,costs,max_cost=0.6,min_FOV_present=None)
# else:
# neurons=[np.arange(B_s[0].shape[-1])]
# #%%
# np.savez(base_folder+'neurons_matching.npz',matches=matches,costs=costs,neurons=neurons,D_s=D_s)
# #%%
# re_load = False
# if re_load:
# import calblitz as cb
# from calblitz.granule_cells import utils_granule as gc
# from glob import glob
# import numpy as np
# import os
# import scipy
# import pylab as pl
# import ca_source_extraction as cse
#
# if is_blob:
# with np.load(base_folder+'distance_masks.npz') as ld:
# D_s=ld['D_s']
# with np.load(base_folder+'neurons_matching.npz') as ld:
# locals().update(ld)
#
#
#
# with np.load(base_folder+'all_triggers.npz') as at:
# triggers_img=at['triggers']
# trigger_names_img=at['trigger_names']
#
# with np.load(base_folder+'behavioral_traces.npz') as ld:
# res_bt = dict(**ld)
# tm=res_bt['time']
# f_rate_bh=1/np.median(np.diff(tm))
# ISI=res_bt['trial_info'][0][3]-res_bt['trial_info'][0][2]
# eye_traces=np.array(res_bt['eyelid'])
# idx_CS_US=res_bt['idx_CS_US']
# idx_US=res_bt['idx_US']
# idx_CS=res_bt['idx_CS']
#
# idx_ALL=np.sort(np.hstack([idx_CS_US,idx_US,idx_CS]))
# eye_traces,amplitudes_at_US, trig_CRs=gc.process_eyelid_traces(eye_traces,tm,idx_CS_US,idx_US,idx_CS,thresh_CR=.15,time_CR_on=-.1,time_US_on=.05)
#
# idxCSUSCR = trig_CRs['idxCSUSCR']
# idxCSUSNOCR = trig_CRs['idxCSUSNOCR']
# idxCSCR = trig_CRs['idxCSCR']
# idxCSNOCR = trig_CRs['idxCSNOCR']
# idxNOCR = trig_CRs['idxNOCR']
# idxCR = trig_CRs['idxCR']
# idxUS = trig_CRs['idxUS']
# idxCSCSUS=np.concatenate([idx_CS,idx_CS_US])
#
#
# f_results= glob(base_folder+'*results_analysis.npz')
# f_results.sort()
# for rs in f_results:
# print rs
# print '*****'
# A_s,C_s,YrA_s, Cn_s, b_s, f_s, shape = gc.load_results(f_results)
# if is_blob:
# remove_unconnected_components=True
# else:
# remove_unconnected_components=False
#
# neurons=[]
# for xx in A_s:
# neurons.append(np.arange(A_s[0].shape[-1]))
#
# B_s, lab_imgs, cm_s = gc. threshold_components(A_s,shape, min_size=5,max_size=50,max_perc=.5,remove_unconnected_components=remove_unconnected_components)
# #%%
#
# row_cols=np.ceil(np.sqrt(len(A_s)))
# for idx,B in enumerate(A_s):
# pl.subplot(row_cols,row_cols,idx+1)
# pl.imshow(np.reshape(B[:,neurons[idx]].sum(1),shape,order='F'))
# pl.savefig(base_folder+'neuron_matches.pdf')
#
# #%%
# if not batch_mode:
# num_neurons=neurons[0].size
# for neuro in range(num_neurons):
# for idx,B in enumerate(A_s):
# pl.subplot(row_cols,row_cols,idx+1)
# pl.imshow(np.reshape(B[:,neurons[idx][neuro]].sum(1),shape,order='F'))
# pl.pause(.01)
# for idx,B in enumerate(A_s):
# pl.subplot(row_cols,row_cols,idx+1)
# pl.cla()
#
# #%%
# if 0:
# idx=0
# for row, column in zip(matches[idx][0],matches[idx][1]):
# value = D_s[idx][row,column]
# if value < .5:
# pl.cla()
# pl.imshow(np.reshape(B_s[idx][:,row].todense(),(512,512),order='F'),cmap='gray',interpolation='None')
# pl.imshow(np.reshape(B_s[idx+1][:,column].todense(),(512,512),order='F'),alpha=.5,cmap='hot',interpolation='None')
# if B_s[idx][:,row].T.dot(B_s[idx+1][:,column]).todense() == 0:
# print 'Flaw'
# pl.pause(.3)
#
# #%%
# tmpl_name=glob(base_folder+'*template_total.npz')[0]
# print tmpl_name
# with np.load(tmpl_name) as ld:
# mov_names_each=ld['movie_names']
#
#
# traces=[]
# traces_BL=[]
# traces_DFF=[]
# all_chunk_sizes=[]
#
# for idx, mov_names in enumerate(mov_names_each):
# idx=0
# A=A_s[idx][:,neurons[idx]]
# # C=C_s[idx][neurons[idx]]
# # YrA=YrA_s[idx][neurons[idx]]
# b=b_s[idx]
# f=f_s[idx]
# chunk_sizes=[]
# for mv in mov_names:
# base_name=os.path.splitext(os.path.split(mv)[-1])[0]
# with np.load(base_folder+base_name+'.npz') as ld:
# TT=len(ld['shifts'])
# chunk_sizes.append(TT)
#
#
# all_chunk_sizes.append(chunk_sizes)
#
# traces_,traces_DFF_,traces_BL_ = gc.generate_linked_traces(mov_names,chunk_sizes,A,b,f)
# traces=traces+traces_
# traces_DFF=traces_DFF+traces_DFF_
# traces_BL=traces_BL+traces_BL_
#
# #%%
# import pickle
# with open(base_folder+'traces.pk','w') as f:
# pickle.dump(dict(traces=traces,traces_BL=traces_BL,traces_DFF=traces_DFF),f)
#
# #%%
# if not batch_mode:
# with open(base_folder+'traces.pk','r') as f:
# locals().update(pickle.load(f) )
# #%%
# chunk_sizes=[]
# for idx,mvs in enumerate(mov_names_each):
# print idx
# for mv in mvs:
# base_name=os.path.splitext(os.path.split(mv)[-1])[0]
# with np.load(os.path.join(base_folder,base_name+'.npz')) as ld:
# TT=len(ld['shifts'])
# chunk_sizes.append(TT)
#
#
# min_chunk=np.min(chunk_sizes)
# max_chunk=np.max(chunk_sizes)
# num_chunks=np.sum(chunk_sizes)
# #%%
# import copy
# Ftraces=copy.deepcopy(traces_DFF[:])
#
# #%%
#
# #%%
# interpolate=False
# CS_ALONE=0
# US_ALONE= 1
# CS_US=2
#
# samples_before=np.int(2.8*f_rate)
# samples_after=np.int(7.3*f_rate)-samples_before
#
#
# if interpolate:
# Ftraces_mat=np.zeros([len(chunk_sizes),len(traces[0]),max_chunk])
# abs_frames=np.arange(max_chunk)
# else:
# Ftraces_mat=np.zeros([len(chunk_sizes),len(traces[0]),samples_after+samples_before])
#
# crs=idxCR
# nocrs=idxNOCR
# uss=idxUS
#
# triggers_img=np.array(triggers_img)
#
# idx_trig_CS=triggers_img[:][:,0]
# idx_trig_US=triggers_img[:][:,1]
# trial_type=triggers_img[:][:,2]
# length=triggers_img[:][:,-1]
# ISI=np.int(np.nanmedian(idx_trig_US)-np.nanmedian(idx_trig_CS))
#
# for idx,fr in enumerate(chunk_sizes):
#
# print idx
#
# if interpolate:
#
# if fr!=max_chunk:
#
# f1=scipy.interpolate.interp1d(np.arange(fr) , Ftraces[idx] ,axis=1, bounds_error=False, kind='linear')
# Ftraces_mat[idx]=np.array(f1(abs_frames))
#
# else:
#
# Ftraces_mat[idx]=Ftraces[idx][:,trigs_US-samples_before]
#
#
# else:
#
# if trial_type[idx] == CS_ALONE:
# Ftraces_mat[idx]=Ftraces[idx][:,np.int(idx_trig_CS[idx]+ISI-samples_before):np.int(idx_trig_CS[idx]+ISI+samples_after)]
# else:
# Ftraces_mat[idx]=Ftraces[idx][:,np.int(idx_trig_US[idx]-samples_before):np.int(idx_trig_US[idx]+samples_after)]
#
# #%%
# wheel_traces, movement_at_CS, trigs_mov = gc.process_wheel_traces(np.array(res_bt['wheel']),tm,thresh_MOV_iqr=1000,time_CS_on=-.25,time_US_on=0)
# print trigs_mov
# mn_idx_CS_US=np.intersect1d(idx_CS_US,trigs_mov['idxNO_MOV'])
# nm_idx_US=np.intersect1d(idx_US,trigs_mov['idxNO_MOV'])
# nm_idx_CS=np.intersect1d(idx_CS,trigs_mov['idxNO_MOV'])
# nm_idxCSUSCR = np.intersect1d(idxCSUSCR,trigs_mov['idxNO_MOV'])
# nm_idxCSUSNOCR = np.intersect1d(idxCSUSNOCR,trigs_mov['idxNO_MOV'])
# nm_idxCSCR = np.intersect1d(idxCSCR,trigs_mov['idxNO_MOV'])
# nm_idxCSNOCR = np.intersect1d(idxCSNOCR,trigs_mov['idxNO_MOV'])
# nm_idxNOCR = np.intersect1d(idxNOCR,trigs_mov['idxNO_MOV'])
# nm_idxCR = np.intersect1d(idxCR,trigs_mov['idxNO_MOV'])
# nm_idxUS = np.intersect1d(idxUS,trigs_mov['idxNO_MOV'])
# nm_idxCSCSUS=np.intersect1d(idxCSCSUS,trigs_mov['idxNO_MOV'])
# #%%
# threshold_responsiveness=0.1
# ftraces=Ftraces_mat.copy()
# ftraces=ftraces-np.median(ftraces[:,:,:samples_before-ISI],axis=(2))[:,:,np.newaxis]
# amplitudes_responses=np.mean(ftraces[:,:,samples_before+ISI-1:samples_before+ISI+1],-1)
# cell_responsiveness=np.median(amplitudes_responses[nm_idxCSCSUS],axis=0)
# fraction_responsive=len(np.where(cell_responsiveness>threshold_responsiveness)[0])*1./np.shape(ftraces)[1]
# print fraction_responsive
# ftraces=ftraces[:,cell_responsiveness>threshold_responsiveness,:]
# amplitudes_responses=np.mean(ftraces[:,:,samples_before+ISI-1:samples_before+ISI+1],-1)
# #%%
# np.savez('ftraces.npz',ftraces=ftraces,samples_before=samples_before,samples_after=samples_after,ISI=ISI)
#
#
# #%%pl.close()
# pl.close()
# t=np.arange(-samples_before,samples_after)/f_rate
# pl.plot(t,np.median(ftraces[nm_idxCR],axis=(0,1)),'-*')
# pl.plot(t,np.median(ftraces[nm_idxNOCR],axis=(0,1)),'-d')
# pl.plot(t,np.median(ftraces[nm_idxUS],axis=(0,1)),'-o')
# plt.axvspan((-ISI)/f_rate, 0, color='g', alpha=0.2, lw=0)
# plt.axvspan(0, 0.03, color='r', alpha=0.5, lw=0)
# pl.xlabel('Time to US (s)')
# pl.ylabel('DF/F')
# pl.xlim([-.5, 1])
# pl.legend(['CR+','CR-','US'])
# pl.savefig(base_folder+'eyelid_resp_by_trial.pdf')
#
# #%%
# if not batch_mode:
# pl.close()
# for cell in range(ftraces.shape[1]):
# # pl.cla()
# pl.subplot(11,10,cell+1)
# print cell
# tr_cr=np.median(ftraces[crs,cell,:],axis=(0))
# tr_nocr=np.median(ftraces[nocrs,cell,:],axis=(0))
# tr_us=np.median(ftraces[uss,cell,:],axis=(0))
# pl.imshow(ftraces[np.concatenate([uss,nocrs,crs]),cell,:],aspect='auto',vmin=0,vmax=1)
# pl.xlim([samples_before-10,samples_before+10])
# pl.axis('off')
# # pl.plot(tr_cr,'b')
# # pl.plot(tr_nocr,'g')
# # pl.plot(tr_us,'r')
# # pl.legend(['CR+','CR-','US'])
# # pl.pause(1)
# #%%
# import pandas
#
# bins=np.arange(-.1,.3,.05)
# n_bins=6
# dfs=[];
# dfs_random=[];
# x_name='ampl_eye'
# y_name='ampl_fl'
# for resps in amplitudes_responses.T:
# idx_order=np.arange(len(idxCSCSUS))
# dfs.append(pandas.DataFrame(
# {y_name: resps[idxCSCSUS[idx_order]],
# x_name: amplitudes_at_US[idxCSCSUS]}))
#
# idx_order=np.random.permutation(idx_order)
# dfs_random.append(pandas.DataFrame(
# {y_name: resps[idxCSCSUS[idx_order]],
# x_name: amplitudes_at_US[idxCSCSUS]}))
#
#
# r_s=[]
# r_ss=[]
#
# for df,dfr in zip(dfs,dfs_random): # random scramble
#
# if bins is None:
# [_,bins]=np.histogram(dfr.ampl_eye,n_bins)
# groups = dfr.groupby(np.digitize(dfr.ampl_eye, bins))
# grouped_mean = groups.mean()
# grouped_sem = groups.sem()
# (r,p_val)=scipy.stats.pearsonr(grouped_mean.ampl_eye,grouped_mean.ampl_fl)
# # r=np.corrcoef(grouped_mean.ampl_eye,grouped_mean.ampl_fl)[0,1]
#
# r_ss.append(r)
#
# if bins is None:
# [_,bins]=np.histogram(df.ampl_eye,n_bins)
#
# groups = df.groupby(np.digitize(df.ampl_eye, bins))
# grouped_mean = groups.mean()
# grouped_sem= groups.sem()
# (r,p_val)=scipy.stats.pearsonr(grouped_mean.ampl_eye,grouped_mean.ampl_fl)
# # r=np.corrcoef(grouped_mean.ampl_eye,grouped_mean.ampl_fl)[0,1]
# r_s.append(r)
# if r_s[-1]>.86:
# pl.subplot(1,2,1)
# print 'found'
# pl.errorbar(grouped_mean.ampl_eye,grouped_mean.ampl_fl,grouped_sem.ampl_fl.as_matrix(),grouped_sem.ampl_eye.as_matrix(),fmt='.')
# pl.scatter(grouped_mean.ampl_eye,grouped_mean.ampl_fl,s=groups.apply(len).values*3)#
# pl.xlabel(x_name)
# pl.ylabel(y_name)
#
# mu_scr=np.mean(r_ss)
#
# std_scr=np.std(r_ss)
# [a,b]=np.histogram(r_s,20)
#
# pl.subplot(1,2,2)
# pl.plot(b[1:],scipy.signal.savgol_filter(a,3,1))
# plt.axvspan(mu_scr-std_scr, mu_scr+std_scr, color='r', alpha=0.2, lw=0)
# pl.xlabel('correlation coefficients')
# pl.ylabel('bin counts')
# pl.savefig(base_folder+'correlations.pdf')
#
#
#
# #%%
# if not batch_mode:
# r_s=[]
# for resps in amplitudes_responses.T:
# r=np.corrcoef(amplitudes_at_US[idxCSCSUS],resps[idxCSCSUS])[0,1]
# # if r>.25:
# # pl.scatter(amplitudes_at_US[idxCSCSUS],resps[idxCSCSUS])
# # bins=np.arange(-.3,1.5,.2)
# # a,b=np.histogram(resps,bins)
# # new_dat=[]
# # for bb in a:
# #
# r_s.append(r)
# pl.xlabel('Amplitudes CR')
# pl.ylabel('Amplitudes GC responses')
#
# pl.hist(r_s)
#
# %%
###
# base_name='20160518133747_'
# cam1=base_name+'cam1.h5'
# cam2=base_name+'cam2.h5'
# meta_inf=base_name+'data.h5'
###
# mtot=[]
# eye_traces=[]
# tims=[]
# trial_info=[]
###
# with h5py.File(cam2) as f:
###
# with h5py.File(meta_inf) as dt:
###
# rois=np.asarray(dt['roi'],np.float32)
###
### trials = f.keys()
# trials.sort(key=lambda(x): np.int(x.replace('trial_','')))
### trials_idx=[np.int(x.replace('trial_',''))-1 for x in trials]
###
###
###
###
# for tr,idx_tr in zip(trials,trials_idx):
###
# print tr
###
# trial=f[tr]
###
# mov=np.asarray(trial['mov'])
###
# if 0:
###
# pl.imshow(np.mean(mov,0))
# pts=pl.ginput(-1)
### pts = np.asarray(pts, dtype=np.int32)
### data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
# if CV_VERSION == 2:
# lt = cv2.CV_AA
# elif CV_VERSION == 3:
### lt = cv2.LINE_AA
### cv2.fillConvexPoly(data, pts, (1,1,1), lineType=lt)
# rois[0]=data
# eye_trace=np.mean(mov*rois[0],axis=(1,2))
# mov_trace=np.mean((np.diff(np.asarray(mov,dtype=np.float32),axis=0)**2)*rois[1],axis=(1,2))
# mov=np.transpose(mov,[0,2,1])
###
# mov=mov[:,:,::-1]
###
# if mov.shape[0]>0:
# ts=np.array(trial['ts'])
# if np.size(ts)>0:
# print (ts[-1,0]-ts[0,0])
# new_ts=np.linspace(0,ts[-1,0]-ts[0,0],np.shape(mov)[0])
###
# print 1/np.mean(np.diff(new_ts))
# tims.append(new_ts)
###
# mov=cb.movie(mov*rois[0][::-1].T,fr=1/np.mean(np.diff(new_ts)))
# x_max,y_max=np.max(np.nonzero(np.max(mov,0)),1)
# x_min,y_min=np.min(np.nonzero(np.max(mov,0)),1)
# mov=mov[:,x_min:x_max,y_min:y_max]
### mov=np.mean(mov, axis=(1,2))
###
# if mov.ndim == 3:
# window_hp=(177,1,1)
# window_lp=(7,1,1)
# bl=signal.medfilt(mov,window_hp)
# mov=signal.medfilt(mov-bl,window_lp)
###
# else:
# window_hp=201
# window_lp=3
# bl=signal.medfilt(mov,window_hp)
# bl=cm.mode_robust(mov)
# mov=signal.medfilt(mov-bl,window_lp)
###
###
# if mov.ndim == 3:
### eye_traces.append(np.mean(mov, axis=(1,2)))
# else:
# eye_traces.append(mov)
###
# mtot.append(mov)
# trial_info.append(dt['trials'][idx_tr,:])
# cb.movie(mov,fr=1/np.mean(np.diff(new_ts)))
##
# %%
# %%
# sub_trig_img=downsample_triggers(triggers_img.copy(),fraction_downsample=.3)
# %%
# if num_frames_movie != triggers[-1,-1]:
## raise Exception('Triggers values do not match!')
##
# %%
# fnames=[]
# sub_trig_names=trigger_names[39:95].copy()
# sub_trig=triggers[39:95].copy().T
# for a,b in zip(sub_trig_names,sub_trig):
# fnames.append(a+'.hdf5')
###
# fraction_downsample=.333333333333333333333; # useful to downsample the movie across time. fraction_downsample=.1 measn downsampling by a factor of 10
# sub_trig[:2]=np.round(sub_trig[:2]*fraction_downsample)
# sub_trig[-1]=np.floor(sub_trig[-1]*fraction_downsample)
# sub_trig[-1]=np.cumsum(sub_trig[-1])
# fname_new=cm.save_memmap(fnames,base_name='Yr',resize_fact=(1,1,fraction_downsample),remove_init=0,idx_xy=(slice(90,-10,None),slice(30,-120,None)))
# %%
# m=cb.load(fname_new,fr=30*fraction_downsample)
# T,d1,d2=np.shape(m)
# %%
# if T != sub_trig[-1,-1]:
### raise Exception('Triggers values do not match!')
# %% how to take triggered aligned movie
# wvf=mmm.take(trg)
# %%
# newm=m.take(trg,axis=0)
# newm=newm.mean(axis=1)
# %%
# (newm-np.mean(newm,0)).play(backend='opencv',fr=3,gain=2.,magnification=1,do_loop=True)
# %%v
# Yr,d1,d2,T=cm.load_memmap(fname_new)
# d,T=np.shape(Yr)
# Y=np.reshape(Yr,(d1,d2,T),order='F') # 3D version of the movie
##
# %%
##
# pl.plot(np.nanmedian(np.array(eye_traces).T,1))
##
# %%
##mov = np.concatenate(mtot,axis=0)
# m1=cb.movie(mov,fr=1/np.mean(np.diff(new_ts)))
# x_max,y_max=np.max(np.nonzero(np.max(m,0)),1)
# x_min,y_min=np.min(np.nonzero(np.max(m,0)),1)
# m1=m[:,x_min:x_max,y_min:y_max]
# %% filters
##b, a = signal.butter(8, [.05, .5] ,'bandpass')
# pl.plot(np.mean(m1,(1,2))-80)
# pl.plot(signal.lfilter(b,a,np.mean(m1,(1,2))),linewidth=2)
# %%
# m1.play(backend='opencv',gain=1.,fr=f_rate,magnification=3)
# %% NMF
##comps, tim,_=cb.behavior.extract_components(np.maximum(0,m1-np.min(m1,0)),n_components=4,init='nndsvd',l1_ratio=1,alpha=0,max_iter=200,verbose=True)
# pl.plot(np.squeeze(np.array(tim)).T)
# %% ICA
##from sklearn.decomposition import FastICA
# fica=FastICA(n_components=3,whiten=True,max_iter=200,tol=1e-6)
# X=fica.fit_transform(np.reshape(m1,(m1.shape[0],m1.shape[1]*m1.shape[2]),order='F').T,)
# pl.plot(X)
# %%
# for count,c in enumerate(comps):
# pl.subplot(2,3,count+1)
# pl.imshow(c)
##
# %%
# md=cm.mode_robust(m1,0)
# mm1=m1*(m1<md)
# rob_std=np.sum(mm1**2,0)/np.sum(mm1>0,0)
# rob_std[np.isnan(rob_std)]=0
# mm2=m1*(m1>(md+rob_std))
# %%
##
##dt = h5py.File('20160423165229_data.h5')
# sync for software
# np.array(dt['sync'])
# dt['sync'].attrs['keys']
# dt['trials']
# dt['trials'].attrs
# dt['trials'].attrs['keys']
# you needs to apply here the sync on dt['sync'], like,
##us_time_cam1=np.asarray(dt['trials'])[:,3] - np.array(dt['sync'])[1]
# main is used as the true time stamp, and you can adjust the value with respect to main sync value
# np.array(dt['sync']) # these are the values read on a unique clock from the three threads
# %%
##from skimage.external import tifffile
##
# tf=tifffile.TiffFile('20160423165229_00001_00001.tif')
# imd=tf.pages[0].tags['image_description'].value
# for pag in tf.pages:
# imd=pag.tags['image_description'].value
# i2cd=si_parse(imd)['I2CData']
## print (i2cd)
# %%
# with h5py.File('20160705103903_cam2.h5') as f1:
# for k in f1.keys()[:1]:
### m = np.array(f1[k]['mov'])
###
###
# pl.imshow(np.mean(m,0),cmap='gray')
# %%
# with h5py.File('20160705103903_data.h5') as f1:
# print f1.keys()
### rois= np.array(f1['roi'])
# %%
# with h5py.File('20160705103903_cam2.h5') as f1:
# for k in f1.keys()[:1]:
### m = np.array(f1[k]['mov'])
###
###
# pl.imshow(np.mean(m,0),cmap='gray')
# pl.imshow(rois[0],alpha=.3)
# pl.imshow(rois[1],alpha=.3)
###
| gpl-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| mit |
tobikausk/nest-simulator | pynest/examples/sinusoidal_gamma_generator.py | 3 | 11701 | # -*- coding: utf-8 -*-
#
# sinusoidal_gamma_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
'''
Sinusoidal gamma generator example
----------------------------------
This script demonstrates the use of the `sinusoidal_gamma_generator`
and its different parameters and modes. The source code of the model
can be found in models/sinusoidal_gamma_generator.h.
The script is structured into two parts, each of which generates its
own figure. In part 1A, two generators are created with different
orders of the underlying gamma process and their resulting PST
(Peristiumulus time) and ISI (Inter-spike interval) histograms are
plotted. Part 1B illustrates the effect of the
``individual_spike_trains`` switch. In Part 2, the effects of
different settings for rate, phase and frequency are demonstrated.
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
'''
We first create a figure for the plot and set the resolution of NEST.
'''
plt.figure()
nest.SetKernelStatus({'resolution': 0.01})
'''
Then we create two instances of the `sinusoidal_gamma_generator`
with two different orders of the underlying gamma process using
`Create`. Moreover, we create devices to record firing rates
(`multimeter`) and spikes (`spike_detector`) and connect them to the
generators using `Connect`.
'''
g = nest.Create('sinusoidal_gamma_generator', n=2,
params=[{'rate': 10000.0, 'amplitude': 5000.0,
'frequency': 10.0, 'phase': 0.0, 'order': 2.0},
{'rate': 10000.0, 'amplitude': 5000.0,
'frequency': 10.0, 'phase': 0.0, 'order': 10.0}])
m = nest.Create('multimeter', n=2, params={'interval': 0.1, 'withgid': False,
'record_from': ['rate']})
s = nest.Create('spike_detector', n=2, params={'withgid': False})
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
nest.Simulate(200)
'''
After simulating, the spikes are extracted from the
`spike_detector` using `GetStatus` and plots are created with panels
for the PST and ISI histograms.
'''
colors = ['b', 'g']
for j in range(2):
ev = nest.GetStatus([m[j]])[0]['events']
t = ev['times']
r = ev['rate']
sp = nest.GetStatus([s[j]])[0]['events']['times']
plt.subplot(221)
h, e = np.histogram(sp, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(sp), bins=np.arange(0., 0.505, 0.01),
histtype='step', color=colors[j])
plt.title('ISI histogram')
'''
The kernel is reset and the number of threads set to 4.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
'''
First, a `sinusoidal_gamma_generator` with
`individual_spike_trains` set to ``True`` is created and connected to
20 parrot neurons whose spikes are recorded by a spike detector. After
simulating, a raster plot of the spikes is created.
'''
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
'''
The kernel is reset again and the whole procedure is repeated for
a `sinusoidal_gamma_generator` with `individual_spike_trains` set to ``False``.
The plot shows that in this case, all neurons receive the same spike train from
the `sinusoidal_gamma_generator`.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
'''
In part 2, multiple generators are created with different settings
for rate, phase and frequency. First, we define an auxiliary function
which simulates ``n`` generators for ``t`` ms. After ``t/2``, the
parameter dictionary of the generators is changed from initial to
after.
'''
def step(t, n, initial, after, seed=1, dt=0.05):
"""Simulates for n generators for t ms. Step at t/2."""
nest.ResetKernel()
nest.SetStatus([0], [{"resolution": dt}])
nest.SetStatus([0], [{"grng_seed": 256 * seed + 1}])
nest.SetStatus([0], [{"rng_seeds": [256 * seed + 2]}])
g = nest.Create('sinusoidal_gamma_generator', n, params=initial)
sd = nest.Create('spike_detector')
nest.Connect(g, sd)
nest.Simulate(t / 2)
nest.SetStatus(g, after)
nest.Simulate(t / 2)
return nest.GetStatus(sd, 'events')[0]
'''
This function serves to plot a histogram of the emitted spikes.
'''
def plot_hist(spikes):
plt.hist(spikes['times'],
bins=np.arange(0., max(spikes['times']) + 1.5, 1.),
histtype='step')
t = 1000
n = 1000
dt = 1.0
steps = int(t / dt)
offset = t / 1000. * 2 * np.pi
'''
We create a figure with a 2x3 grid.
'''
grid = (2, 3)
fig = plt.figure(figsize=(15, 10))
'''
Simulate a `sinusoidal_gamma_generator` with default parameter
values, i.e. ac=0 and the DC value being changed from 20 to 50 after
``t/2`` and plot the number of spikes per second over time.
'''
plt.subplot(grid[0], grid[1], 1)
spikes = step(t, n,
{'rate': 20.0},
{'rate': 50.0, },
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(steps)
exp[:int(steps / 2)] *= 20
exp[int(steps / 2):] *= 50
plt.plot(exp, 'r')
plt.title('DC rate: 20 -> 50')
plt.ylabel('Spikes per second')
'''
Simulate a `sinusoidal_gamma_generator` with the DC value being
changed from 80 to 40 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 2)
spikes = step(t, n,
{'order': 6.0, 'rate': 80.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
{'order': 6.0, 'rate': 40.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(steps)
exp[:int(steps / 2)] *= 80
exp[int(steps / 2):] *= 40
plt.plot(exp, 'r')
plt.title('DC rate: 80 -> 40')
'''
Simulate a `sinusoidal_gamma_generator` with the AC value being
changed from 40 to 20 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 3)
spikes = step(t, n,
{'order': 3.0, 'rate': 40.0, 'amplitude': 40.,
'frequency': 10., 'phase': 0.},
{'order': 3.0, 'rate': 40.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (40. +
40. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[int(steps / 2):] = (40. + 20. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 40 -> 20')
'''
Simulate a `sinusoidal_gamma_generator` with a non-zero AC value
and the DC value being changed from 80 to 40 after ``t/2`` and plot
the number of spikes per second over time.
'''
plt.subplot(grid[0], grid[1], 4)
spikes = step(t, n,
{'order': 6.0, 'rate': 20.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 50.0, 'amplitude': 50.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (20. + 20. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[int(steps / 2):] = (50. + 50. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('DC Rate and Rate Modulation: 20 -> 50')
plt.ylabel('Spikes per second')
plt.xlabel('Time [ms]')
'''
Simulate a `sinusoidal_gamma_generator` with the AC value being
changed from 0 to 40 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 5)
spikes = step(t, n,
{'rate': 40.0, },
{'amplitude': 40.0, 'frequency': 20.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = 40. * np.ones(steps / 2)
exp[int(steps / 2):] = (40. + 40. * np.sin(np.arange(0, t / 1000. * np.pi * 20,
t / 1000. * np.pi * 20. /
(steps / 2))))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 0 -> 40')
plt.xlabel('Time [ms]')
'''
Simulate a `sinusoidal_gamma_generator` with a phase shift at
``t/2`` and plot the number of spikes per second over time.
'''
# Phase shift
plt.subplot(grid[0], grid[1], 6)
spikes = step(t, n,
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 180.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (60. + 60. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[int(steps / 2):] = (60. + 60. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) +
offset + np.pi))
plt.plot(exp, 'r')
plt.title('Modulation Phase: 0 -> Pi')
plt.xlabel('Time [ms]')
| gpl-2.0 |
zekearneodo/ephys-tools | ephysScripts/analysis/response_functions.py | 1 | 10469 | from __future__ import division
__author__ = 'zeke'
import numpy as np
from scipy.stats import ks_2samp
import itertools
import matplotlib.pyplot as plt
from data_handling.data_load import get_warping_parameters
from data_handling.basic_plot import decim, col_binned, plot_raster, make_psth
import stimulus as st
# compare one raster against another one with many more trials
def raster_compare(stimulus_sa, baseline_sa, bootstrap=False):
"""
stimulus_sa: numpy array of shape (n_bins, n_trials)
baseline_sa: numpy array of shape (n_bins, n_sniffs)
retunrs:
ps: vector of p(x_bl > x_stim)
baseline_boot: array of psths simulated picking n_trials from baseline dist
ks_p: vector of p-values of two sample ks test
ks_stat: vector of statistic value for the ks test
"""
def draw_and_mean(spike_array, n_bs, n_trials):
# draw= np.empty_like(bl_sa)
draw_indexes = np.random.randint(0, spike_array.shape[1]-1, (n_bs, n_trials))
psths = np.empty((n_bs, spike_array.shape[0]))
for i in xrange(n_bs):
draw_is = draw_indexes[i]
draw = spike_array[:, draw_is]
psths[i, :] = draw.mean(axis=1)
return psths
"""
spike arrays are each numpy.array of shape (n_bins, n_trials) (for baseline it's actually n_sniffs instead of trials)
spike arrays here are normalized (Hz).
"""
assert(stimulus_sa.shape[0] == baseline_sa.shape[0])
response_mean = stimulus_sa.mean(axis=1)
ntr = stimulus_sa.shape[1] # number of trials in stimulus.
ks_p = np.empty_like(response_mean)
ks_stat = np.empty_like(response_mean)
for j in xrange(len(response_mean)):
r = response_mean[j]
ks_stat[j], ks_p[j] = ks_2samp(stimulus_sa[j, :], baseline_sa[j, :])
if bootstrap:
baseline_boot = draw_and_mean(baseline_sa, 100000, ntr)
ps = np.empty_like(response_mean)
for j in xrange(len(response_mean)):
r = response_mean[j]
base = baseline_boot[:, j]
p = np.sum(base >= r)/len(base)
ps[j] = p
ps = np.asarray(ps)
else:
ps = None
baseline_boot = None
return ps, baseline_boot, ks_p, ks_stat
# find the onset of a divergence of the two series of bins with a p-value lower than p
def find_onset(response, bin_size=10, t_post=0, p=0.05, warped=False, debug=False):
"""
:param response: response object (with baseline) or cellResponse object
:param bin_size: size of the bin for comparison (int)
:param t_post: time after onset of stimulus (or sniff) for the search
:param p: p-value for accepting the hypothesis (ks test)
:param warped: whether to warp the data or not
:return: onset: first bin of significant difference (np.nan if no significant diff found in range)
supra: whether the deviation is above or below baseline
"""
if isinstance(response, st.CellResponse):
t_post = response.inh_len + response.exh_len if t_post == 0 else t_post
rst_sa, bl_sa = response.make_raster(t_pre=0, t_post=t_post, warped=warped)
rst_sa = col_binned(rst_sa, bin_size).transpose()
bl_sa = col_binned(bl_sa, bin_size).transpose()
else:
if t_post == 0:
all_sniffs = np.sort(response.baseline.sniff_data, order=['inh_len', 't_0'])
if warped:
inh_len, exh_len = get_warping_parameters(all_sniffs, means=False)
else:
inh_len, exh_len = get_warping_parameters(all_sniffs, means=True)
t_post = inh_len + exh_len
rst_sa = col_binned(response.make_raster(t_pre=0, t_post=t_post, warped=warped), bin_size).transpose()
bl_sa = col_binned(response.baseline.make_raster(t_pre=0, t_post=t_post, warped=warped), bin_size).transpose()
# get the statistics
_, _, ks, kst = raster_compare(rst_sa, bl_sa, bootstrap=False)
# debugging
if debug:
t_pre = 0
rst = response.make_raster(warped=warped, t_pre=0, t_post=t_post)
bl=response.baseline.make_raster(t_pre=0, t_post=t_post, warped=warped)
events = bl.shape[0]
t_stamps = bl.shape[1]
t=np.arange(t_stamps)
t_dec = decim(t, bin_size)
#plot_raster(bl, t0=t_pre, t2=t_post, bin_size=bin_size)
#plot_raster(rst, t0=t_pre, t2=t_post, bin_size=bin_size)
plt.plot(t_dec, rst_sa.mean(axis=1))
plt.plot(t_dec, bl_sa.mean(axis=1))
#plt.plot(t_dec, psths[t_post//bin_size, :])
response_mean = rst_sa.mean(axis=1)
#plt.plot(t_dec, psths.mean(axis=0))
plt.figure()
plt.plot(t_dec, ks)
#plt.plot(t_dec, ps)
#find the first significant difference
onset = next(itertools.ifilter(lambda i: ks[i] < p, range(len(ks))), None)
if onset is not None:
is_supra = rst_sa.mean(axis=1)[onset] > bl_sa.mean(axis=1)[onset]
else:
onset = np.nan
is_supra = None
return onset, is_supra, ks
# find the onset of a divergence of the two series of bins with a p-value lower than p using a two-step approach:
# fist find roughly the onset using find_onset (binned data, ks test)
# then do a fine search within that segment using the bootstrap procedure
def find_detailed_onset(response, bin_size=10, precision=1, p_ks=0.05, p_bs=0.001, warped=False, t_post=0):
#get the bin onset using the KS test and a large bin_size
"""
:param response: Response object (with baseline) or CellResponse object
:param bin_size: size of the bin for comparison (int)
:param precision: size of the bin for the second step (bootstrap test)
:param p_ks: p-value for rejecting null hypothesis in first step
:param p_bs: p-value for rejecting null hypothesis in second step
:param warped: whether to warp the data or not
:param t_post: time after onset of stimulus (or sniff) for the search
:return:
"""
onset, is_supra, ks = find_onset(response, bin_size=bin_size, p=p_ks, warped=warped, t_post=t_post)
if onset is np.nan:
return onset, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
#now get a more detailed one using the bootstrap over a two-bin window
t1 = int(round(onset-1)*bin_size)
t1 = max(0, t1)
t2 = t1 + 2*bin_size
if isinstance(response, st.CellResponse):
rst, bl = response.make_raster(t_pre=0, t_post = t2 + bin_size, warped=warped)
rst = rst[:, t1:t2 + bin_size]
bl = bl[:, t1:t2 + bin_size]
else:
rst = response.make_raster(t_pre=0, t_post=t2 + bin_size, warped=warped)[:, t1:t2 + bin_size]
bl = response.baseline.make_raster(t_pre=0, t_post=t2 + bin_size, warped=warped)[:, t1:t2 + bin_size]
rst_sa = col_binned(rst, precision).transpose()
bl_sa = col_binned(bl, precision).transpose()
ps, baseline_boot, ks_p, ks_stat = raster_compare(rst_sa, bl_sa, bootstrap=True)
#find the first significant difference
det_onset = next(itertools.ifilter(lambda i: ps[i] < p_bs, range(len(ps))), None)
if det_onset is not None:
final_onset = det_onset + t1
t_onset = det_onset
else:
final_onset = onset * bin_size
t_onset = bin_size
#compute the value of the baseline and the response
t_on = max(0, t_onset-bin_size//2)
bl_value = bl_sa.mean(axis=1)[t_on: t_on + bin_size].sum()/(bin_size*0.001)
onset_value = rst_sa.mean(axis=1)[t_on: t_on + bin_size].sum()/(bin_size*0.001)
#if warped, return the value in sniff value
#if warped:
#warped_onset = warp_time(response, final_onset)
#final_onset = warped_onset
#bl_value = bl_sa
#onset_value = rst_sa
return final_onset, is_supra, ps, baseline_boot, ks_p, ks_stat, bl_value, onset_value
# count the spikes relative to baseline for both parts of the sniff cycle
def count_spikes(response):
if isinstance(response, st.CellResponse):
inh_len = response.inh_len
exh_len = response.exh_len
t_post = inh_len + exh_len
rst, bl = response.make_raster(t_pre=0, t_post = t_post, warped=True)
rst_sp = rst.mean(axis=0)
bl_sp = bl.mean(axis=0)
else:
all_sniffs = np.sort(response.baseline.sniff_data, order=['inh_len', 't_0'])
inh_len, exh_len = get_warping_parameters(all_sniffs, means=False)
t_post = inh_len + exh_len
rst_sp = response.make_raster(t_pre=0, t_post=t_post, warped=True).mean(axis=0)
bl_sp = response.baseline.make_raster(t_pre=0, t_post=t_post, warped=True).mean(axis=0)
extra = rst_sp - bl_sp
inh_spikes = extra[0: inh_len].sum()/(inh_len*0.001)
exh_spikes = extra[inh_len: t_post].sum()/(inh_len*0.001)
return inh_spikes, exh_spikes
# check whether a sniff fals within the statistics, or gives the acceptable boundaries
def is_good_sniff(one_sniff, sniff_stats):
if one_sniff is None:
inh_max = sniff_stats[-1]['inh_median'] + 1.5*sniff_stats[-1]['inh_sd']
exh_max = sniff_stats[-1]['exh_median'] + 1.5*sniff_stats[-1]['exh_sd']
return int(round(inh_max)), int(round(exh_max))
inh_min = sniff_stats[-1]['inh_median'] - 1.5*sniff_stats[-1]['inh_sd']
inh_max = sniff_stats[-1]['inh_median'] + 1.5*sniff_stats[-1]['inh_sd']
is_good = True
if one_sniff['inh_len'] < inh_min or one_sniff['inh_len'] > inh_max:
is_good = False
else:
exh_min = sniff_stats[-1]['exh_median'] - 1.5*sniff_stats[-1]['exh_sd']
exh_max = sniff_stats[-1]['exh_median'] + 1.5*sniff_stats[-1]['exh_sd']
if one_sniff['exh_len'] < exh_min or one_sniff['exh_len'] > exh_max:
is_good = True
return is_good
def unwarp_time(response, t, inh_len=None, exh_len=None):
if inh_len is None or exh_len is None:
all_sniffs = np.sort(response.baseline.sniff_data, order=['inh_len', 't_0'])
inh_len, exh_len = get_warping_parameters(all_sniffs, means=False)
if t <= 0.5:
t_unwarped = t * inh_len
else:
t_unwarped = inh_len + (t - 0.5)*exh_len
return t_unwarped
def warp_time(response, t):
all_sniffs = np.sort(response.baseline.sniff_data, order=['inh_len', 't_0'])
inh_len, exh_len = get_warping_parameters(all_sniffs, means=False)
if t <= inh_len:
t_warped = t/inh_len
else:
t_warped = 0.5 + (t-inh_len)/exh_len
return t_warped | gpl-2.0 |
mmilutinovic1313/zipline-with-algorithms | tests/test_rolling_panel.py | 20 | 7005 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import MutableIndexRollingPanel, RollingPanel
from zipline.finance.trading import with_environment
class TestRollingPanel(unittest.TestCase):
@with_environment()
def test_alignment(self, env):
items = ('a', 'b')
sids = (1, 2)
dts = env.market_minute_window(
env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts[2:],
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
rp.extend_back(dts[:-2])
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts,
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
@with_environment()
def test_get_current_multiple_call_same_tick(self, env):
"""
In old get_current, each call the get_current would copy the data. Thus
changing that object would have no side effects.
To keep the same api, make sure that the raw option returns a copy too.
"""
def data_id(values):
return values.__array_interface__['data']
items = ('a', 'b')
sids = (1, 2)
dts = env.market_minute_window(
env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
# each get_current call makea a copy
cur = rp.get_current()
cur2 = rp.get_current()
assert data_id(cur.values) != data_id(cur2.values)
# make sure raw follow same logic
raw = rp.get_current(raw=True)
raw2 = rp.get_current(raw=True)
assert data_id(raw) != data_id(raw2)
class TestMutableIndexRollingPanel(unittest.TestCase):
def test_basics(self, window=10):
items = ['bar', 'baz', 'foo']
minor = ['A', 'B', 'C', 'D']
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque(maxlen=window)
frames = {}
for i, date in enumerate(dates):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def test_adding_and_dropping_items(self, n_items=5, n_minor=10, window=10,
periods=30):
np.random.seed(123)
items = deque(range(n_items))
minor = deque(range(n_minor))
expected_items = deque(range(n_items))
expected_minor = deque(range(n_minor))
first_non_existant = max(n_items, n_minor) + 1
# We want to add new columns with random order
add_items = np.arange(first_non_existant, first_non_existant + periods)
np.random.shuffle(add_items)
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
expected_frames = deque(maxlen=window)
expected_dates = deque()
for i, (date, add_item) in enumerate(zip(dates, add_items)):
frame = pd.DataFrame(np.random.randn(n_items, n_minor),
index=items, columns=minor)
if i >= window:
# Old labels and dates should start to get dropped at every
# call
del frames[expected_dates.popleft()]
expected_minor.popleft()
expected_items.popleft()
expected_frames.append(frame)
expected_dates.append(date)
rp.add_frame(date, frame)
frames[date] = frame
result = rp.get_current()
np.testing.assert_array_equal(sorted(result.minor_axis.values),
sorted(expected_minor))
np.testing.assert_array_equal(sorted(result.items.values),
sorted(expected_items))
tm.assert_frame_equal(frame.T,
result.ix[frame.index, -1, frame.columns])
expected_result = pd.Panel(frames).swapaxes(0, 1)
tm.assert_panel_equal(expected_result,
result)
# Insert new items
minor.popleft()
minor.append(add_item)
items.popleft()
items.append(add_item)
expected_minor.append(add_item)
expected_items.append(add_item)
| apache-2.0 |
martindurant/misc | matplotlibwidget.py | 1 | 2494 | # -*- coding: utf-8 -*-
"""
Controller for matplotlib widget, with or without toolbar, for use in any
Qt application, and insertable using Designer, by widget promotion.
"""
try:
from PyQt4 import QtGui
except ImportError:
from PySide import QtGui
from matplotlib.backends.backend_qt4agg import (FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QTAgg as NavigationToolbar)
from matplotlib.figure import Figure
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class MatplotlibWidget(QtGui.QWidget):
"""Widget within a Qt application that can show figures created in the
normal way by matplotlib.
"""
def __init__(self, parent = None, toolbar=True):
"""
Run with toolbar=True gives you the typical control bar,
which will interact with the plot.
"""
QtGui.QWidget.__init__(self, parent)
self.parent = parent
self.vbl = QtGui.QVBoxLayout()
self.setLayout(self.vbl)
self.clear(toolbar)
def draw(self):
self.canvas.draw()
def clear(self, toolbar=True):
"""Clear the figure and reinvoke the canvas, so that any mpl_event
watchers are removed.
"""
try:
self.figure.clear()
self.vbl.takeAt(0)
self.vbl.takeAt(0)
del self.canvas
del self.figure
except: pass
self.canvas = MplCanvas()
self.figure = self.canvas.fig
if toolbar:
self.mpl_toolbar = NavigationToolbar(self.canvas, self.parent)
self.vbl.addWidget(self.mpl_toolbar)
self.vbl.addWidget(self.canvas)
def widget_window(parent=None):
"""Make a window with a widget and navigation bar, as if
you had called pylab.figure(), but guaranteed Qt and
interactive"""
display = QtGui.QWidget(parent)
display.setWindowTitle('Figure')
display.box = QtGui.QVBoxLayout(display)
display.setLayout(display.box)
fig = Figure()
display.fig=fig
canvas = FigureCanvas(fig)
canvas.setParent(display)
display.canvas=canvas
canvas.toolbar = NavigationToolbar(canvas,display)
display.box.addWidget(canvas)
display.box.addWidget(canvas.toolbar)
display.show()
return display,fig
| mit |
gfyoung/pandas | pandas/tests/resample/test_timedelta.py | 2 | 6186 | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.indexes.timedeltas import timedelta_range
def test_asfreq_bug():
df = DataFrame(data=[1, 3], index=[timedelta(), timedelta(minutes=3)])
result = df.resample("1T").asfreq()
expected = DataFrame(
data=[1, np.nan, np.nan, 3],
index=timedelta_range("0 day", periods=4, freq="1T"),
)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13223
index = pd.to_timedelta(["0s", pd.NaT, "2s"])
result = DataFrame({"value": [2, 3, 5]}, index).resample("1s").mean()
expected = DataFrame(
{"value": [2.5, np.nan, 5.0]},
index=timedelta_range("0 day", periods=3, freq="1S"),
)
tm.assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod():
# GH 13022
index = timedelta_range("00:00:00", "00:10:00", freq="5T")
df = DataFrame(data={"value": [1, 5, 10]}, index=index)
result = df.resample("2T").asfreq()
expected_data = {"value": [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(
data=expected_data, index=timedelta_range("00:00:00", "00:10:00", freq="2T")
)
tm.assert_frame_equal(result, expected)
def test_resample_with_timedeltas():
expected = DataFrame({"A": np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range("0 days", freq="30T", periods=50)
df = DataFrame(
{"A": np.arange(1480)}, index=pd.to_timedelta(np.arange(1480), unit="T")
)
result = df.resample("30T").sum()
tm.assert_frame_equal(result, expected)
s = df["A"]
result = s.resample("30T").sum()
tm.assert_series_equal(result, expected["A"])
def test_resample_single_period_timedelta():
s = Series(list(range(5)), index=pd.timedelta_range("1 day", freq="s", periods=5))
result = s.resample("2s").sum()
expected = Series(
[1, 5, 4], index=pd.timedelta_range("1 day", freq="2s", periods=3)
)
tm.assert_series_equal(result, expected)
def test_resample_timedelta_idempotency():
# GH 12072
index = pd.timedelta_range("0", periods=9, freq="10L")
series = Series(range(9), index=index)
result = series.resample("10L").mean()
expected = series
tm.assert_series_equal(result, expected)
def test_resample_offset_with_timedeltaindex():
# GH 10530 & 31809
rng = timedelta_range(start="0s", periods=25, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample("2s", offset="5s").mean()
without_base = ts.resample("2s").mean()
exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
exp_with_base = timedelta_range(start="5s", end="29s", freq="2s")
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex():
# GH #12169
df = DataFrame({"Group_obj": "A"}, index=pd.to_timedelta(list(range(20)), unit="s"))
df["Group"] = df["Group_obj"].astype("category")
result = df.resample("10s").agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame(
{"Group_obj": ["A", "A"], "Group": ["A", "A"]},
index=pd.TimedeltaIndex([0, 10], unit="s", freq="10s"),
)
expected = expected.reindex(["Group_obj", "Group"], axis=1)
expected["Group"] = expected["Group_obj"]
tm.assert_frame_equal(result, expected)
def test_resample_timedelta_values():
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range("1 day", "6 day", freq="4D")
df = DataFrame({"time": times}, index=times)
times2 = timedelta_range("1 day", "6 day", freq="2D")
exp = Series(times2, index=times2, name="time")
exp.iloc[1] = pd.NaT
res = df.resample("2D").first()["time"]
tm.assert_series_equal(res, exp)
res = df["time"].resample("2D").first()
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"start, end, freq, resample_freq",
[
("8H", "21h59min50s", "10S", "3H"), # GH 30353 example
("3H", "22H", "1H", "5H"),
("527D", "5006D", "3D", "10D"),
("1D", "10D", "1D", "2D"), # GH 13022 example
# tests that worked before GH 33498:
("8H", "21h59min50s", "10S", "2H"),
("0H", "21h59min50s", "10S", "3H"),
("10D", "85D", "D", "2D"),
],
)
def test_resample_timedelta_edge_case(start, end, freq, resample_freq):
# GH 33498
# check that the timedelta bins does not contains an extra bin
idx = pd.timedelta_range(start=start, end=end, freq=freq)
s = Series(np.arange(len(idx)), index=idx)
result = s.resample(resample_freq).min()
expected_index = pd.timedelta_range(freq=resample_freq, start=start, end=end)
tm.assert_index_equal(result.index, expected_index)
assert result.index.freq == expected_index.freq
assert not np.isnan(result[-1])
def test_resample_with_timedelta_yields_no_empty_groups():
# GH 10603
df = DataFrame(
np.random.normal(size=(10000, 4)),
index=pd.timedelta_range(start="0s", periods=10000, freq="3906250n"),
)
result = df.loc["1s":, :].resample("3s").apply(lambda x: len(x))
expected = DataFrame(
[[768.0] * 4] * 12 + [[528.0] * 4],
index=pd.timedelta_range(start="1s", periods=13, freq="3s"),
)
tm.assert_frame_equal(result, expected)
def test_resample_quantile_timedelta():
# GH: 29485
df = DataFrame(
{"value": pd.to_timedelta(np.arange(4), unit="s")},
index=pd.date_range("20200101", periods=4, tz="UTC"),
)
result = df.resample("2D").quantile(0.99)
expected = DataFrame(
{
"value": [
pd.Timedelta("0 days 00:00:00.990000"),
pd.Timedelta("0 days 00:00:02.990000"),
]
},
index=pd.date_range("20200101", periods=2, tz="UTC", freq="2D"),
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
yashchandak/GNN | Sample_Run/Idea1/Eval_linear.py | 1 | 2480 | from __future__ import print_function
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
import scipy
import numpy
import Eval_Calculate_Performance as perf
from Eval_Data import Data
from Eval_Config import Config
import Eval_utils as utils
def get_dense(inp, size):
dense = numpy.zeros((len(inp),size))
for i in range(len(inp)):
dense[i][inp[i]] = 1
return dense
def evaluate(cfg):
data = Data(cfg)
all_results = {}
for train_percent in cfg.training_percents:
all_results[train_percent] = {}
for shuf in range(cfg.num_shuffles):
data.set_training_validation(('train',shuf, int(train_percent*100)), ('valid',shuf, int(train_percent*100)))
X_train, Y_train = data.get_training_sparse()
X_test, Y_test = data.get_validation_sparse()
Y_train_dense = get_dense(Y_train, cfg.label_len)
Y_test_dense = get_dense(Y_test, cfg.label_len)
clf = OneVsRestClassifier(LogisticRegression())
clf.fit(X_train, scipy.sparse.coo_matrix(Y_train_dense))
best_th = 0
if cfg.threshold:
best_val, i = -1, 0.1
while(i<0.3):
preds = clf.predict_proba(X_train)
val = perf.evaluate(preds, Y_train_dense, threshold=i)[3] #3 = micr0-f1, 4=macro-f1
if val > best_val:
best_th = i
best_val = val
i += 0.1
print("best th: ", best_th)
preds = clf.predict_proba(X_test)
results = perf.evaluate(preds, Y_test_dense, best_th)
all_results[train_percent][shuf] = results
for train_percent in sorted(all_results.keys()):
print ('Train percent:', train_percent)
micro, macro = [], []
#print numpy.mean(all_results[train_percent])
x = all_results[train_percent]
for v in x.values():
micro.append(v[3])
macro.append(v[4])
print (x.values())
print ("Micro: ",numpy.mean(micro), " Macro: ",numpy.mean(macro))
print ('-------------------')
utils.write_results(cfg, all_results)
if __name__ == "__main__":
con = Config()
evaluate(con)
| mit |
laosiaudi/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 12 | 9744 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import histogram_ops
class Strict1dCumsumTest(tf.test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = tf.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = tf.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = tf.constant([3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = tf.constant([3], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = tf.constant([1, 3, 6], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(tf.test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = tf.constant([], shape=[0], dtype=tf.bool)
scores = tf.constant([], shape=[0], dtype=tf.float32)
score_range = [0, 1.]
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels, scores,
score_range)
tf.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = tf.placeholder(tf.bool, shape=[num_records])
scores = tf.placeholder(tf.float32, shape=[num_records])
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels,
scores,
score_range,
nbins=nbins)
tf.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
djgagne/scikit-learn | sklearn/utils/validation.py | 6 | 24553 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
jereze/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/__check_build/__init__.py | 13 | 1679 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build # noqa
except ImportError as e:
raise_build_error(e)
| mit |
ndingwall/scikit-learn | sklearn/neighbors/_nca.py | 10 | 20696 | # coding: utf-8
"""
Neighborhood Component Analysis
"""
# Authors: William de Vazelhes <[email protected]>
# John Chiotellis <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from warnings import warn
import numpy as np
import sys
import time
import numbers
from scipy.optimize import minimize
from ..utils.extmath import softmax
from ..metrics import pairwise_distances
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import LabelEncoder
from ..decomposition import PCA
from ..utils.multiclass import check_classification_targets
from ..utils.random import check_random_state
from ..utils.validation import check_is_fitted, check_array, check_scalar
from ..utils.validation import _deprecate_positional_args
from ..exceptions import ConvergenceWarning
class NeighborhoodComponentsAnalysis(TransformerMixin, BaseEstimator):
"""Neighborhood Components Analysis
Neighborhood Component Analysis (NCA) is a machine learning algorithm for
metric learning. It learns a linear transformation in a supervised fashion
to improve the classification accuracy of a stochastic nearest neighbors
rule in the transformed space.
Read more in the :ref:`User Guide <nca>`.
Parameters
----------
n_components : int, default=None
Preferred dimensionality of the projected space.
If None it will be set to ``n_features``.
init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
(n_features_a, n_features_b), default='auto'
Initialization of the linear transformation. Possible options are
'auto', 'pca', 'lda', 'identity', 'random', and a numpy array of shape
(n_features_a, n_features_b).
'auto'
Depending on ``n_components``, the most reasonable initialization
will be chosen. If ``n_components <= n_classes`` we use 'lda', as
it uses labels information. If not, but
``n_components < min(n_features, n_samples)``, we use 'pca', as
it projects data in meaningful directions (those of higher
variance). Otherwise, we just use 'identity'.
'pca'
``n_components`` principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
(See :class:`~sklearn.decomposition.PCA`)
'lda'
``min(n_components, n_classes)`` most discriminative
components of the inputs passed to :meth:`fit` will be used to
initialize the transformation. (If ``n_components > n_classes``,
the rest of the components will be zero.) (See
:class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
'identity'
If ``n_components`` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first ``n_components`` rows.
'random'
The initial transformation will be a random array of shape
`(n_components, n_features)`. Each value is sampled from the
standard normal distribution.
numpy array
n_features_b must match the dimensionality of the inputs passed to
:meth:`fit` and n_features_a must be less than or equal to that.
If ``n_components`` is not None, n_features_a must match it.
warm_start : bool, default=False
If True and :meth:`fit` has been called before, the solution of the
previous call to :meth:`fit` is used as the initial linear
transformation (``n_components`` and ``init`` will be ignored).
max_iter : int, default=50
Maximum number of iterations in the optimization.
tol : float, default=1e-5
Convergence tolerance for the optimization.
callback : callable, default=None
If not None, this function is called after every iteration of the
optimizer, taking as arguments the current solution (flattened
transformation matrix) and the number of iterations. This might be
useful in case one wants to examine or store the transformation
found after each iteration.
verbose : int, default=0
If 0, no progress messages will be printed.
If 1, progress messages will be printed to stdout.
If > 1, progress messages will be printed and the ``disp``
parameter of :func:`scipy.optimize.minimize` will be set to
``verbose - 2``.
random_state : int or numpy.RandomState, default=None
A pseudo random number generator object or a seed for it if int. If
``init='random'``, ``random_state`` is used to initialize the random
transformation. If ``init='pca'``, ``random_state`` is passed as an
argument to PCA when initializing the transformation. Pass an int
for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear transformation learned during fitting.
n_iter_ : int
Counts the number of iterations performed by the optimizer.
random_state_ : numpy.RandomState
Pseudo random number generator object used during initialization.
Examples
--------
>>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> nca = NeighborhoodComponentsAnalysis(random_state=42)
>>> nca.fit(X_train, y_train)
NeighborhoodComponentsAnalysis(...)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> knn.fit(X_train, y_train)
KNeighborsClassifier(...)
>>> print(knn.score(X_test, y_test))
0.933333...
>>> knn.fit(nca.transform(X_train), y_train)
KNeighborsClassifier(...)
>>> print(knn.score(nca.transform(X_test), y_test))
0.961904...
References
----------
.. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
"Neighbourhood Components Analysis". Advances in Neural Information
Processing Systems. 17, 513-520, 2005.
http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf
.. [2] Wikipedia entry on Neighborhood Components Analysis
https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, init='auto', warm_start=False,
max_iter=50, tol=1e-5, callback=None, verbose=0,
random_state=None):
self.n_components = n_components
self.init = init
self.warm_start = warm_start
self.max_iter = max_iter
self.tol = tol
self.callback = callback
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
returns a trained NeighborhoodComponentsAnalysis model.
"""
# Verify inputs X and y and NCA parameters, and transform a copy if
# needed
X, y, init = self._validate_params(X, y)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Compute a mask that stays fixed during optimization:
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
# (n_samples, n_samples)
# Initialize the transformation
transformation = self._initialize(X, y, init)
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {'method': 'L-BFGS-B',
'fun': self._loss_grad_lbfgs,
'args': (X, same_class_mask, -1.0),
'jac': True,
'x0': transformation,
'tol': self.tol,
'options': dict(maxiter=self.max_iter, disp=disp),
'callback': self._callback
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn('[{}] NCA did not converge: {}'.format(
cls_name, opt_result.message),
ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, t_train))
return self
def transform(self, X):
"""Applies the learned transformation to the given data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: ndarray of shape (n_samples, n_components)
The data samples transformed.
Raises
------
NotFittedError
If :meth:`fit` has not been called before.
"""
check_is_fitted(self)
X = check_array(X)
return np.dot(X, self.components_.T)
def _validate_params(self, X, y):
"""Validate parameters as soon as :meth:`fit` is called.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The validated training samples.
y : ndarray of shape (n_samples,)
The validated training labels, encoded to be integers in
the range(0, n_classes).
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Raises
-------
TypeError
If a parameter is not an instance of the desired type.
ValueError
If a parameter's value violates its legal value range or if the
combination of two or more given parameters is incompatible.
"""
# Validate the inputs X and y, and converts y to numerical classes.
X, y = self._validate_data(X, y, ensure_min_samples=2)
check_classification_targets(y)
y = LabelEncoder().fit_transform(y)
# Check the preferred dimensionality of the projected space
if self.n_components is not None:
check_scalar(
self.n_components, 'n_components', numbers.Integral, min_val=1)
if self.n_components > X.shape[1]:
raise ValueError('The preferred dimensionality of the '
'projected space `n_components` ({}) cannot '
'be greater than the given data '
'dimensionality ({})!'
.format(self.n_components, X.shape[1]))
# If warm_start is enabled, check that the inputs are consistent
check_scalar(self.warm_start, 'warm_start', bool)
if self.warm_start and hasattr(self, 'components_'):
if self.components_.shape[1] != X.shape[1]:
raise ValueError('The new inputs dimensionality ({}) does not '
'match the input dimensionality of the '
'previously learned transformation ({}).'
.format(X.shape[1],
self.components_.shape[1]))
check_scalar(self.max_iter, 'max_iter', numbers.Integral, min_val=1)
check_scalar(self.tol, 'tol', numbers.Real, min_val=0.)
check_scalar(self.verbose, 'verbose', numbers.Integral, min_val=0)
if self.callback is not None:
if not callable(self.callback):
raise ValueError('`callback` is not callable.')
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError(
'The input dimensionality ({}) of the given '
'linear transformation `init` must match the '
'dimensionality of the given inputs `X` ({}).'
.format(init.shape[1], X.shape[1]))
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError(
'The output dimensionality ({}) of the given '
'linear transformation `init` cannot be '
'greater than its input dimensionality ({}).'
.format(init.shape[0], init.shape[1]))
if self.n_components is not None:
# Assert that self.n_components = init.shape[0]
if self.n_components != init.shape[0]:
raise ValueError('The preferred dimensionality of the '
'projected space `n_components` ({}) does'
' not match the output dimensionality of '
'the given linear transformation '
'`init` ({})!'
.format(self.n_components,
init.shape[0]))
elif init in ['auto', 'pca', 'lda', 'identity', 'random']:
pass
else:
raise ValueError(
"`init` must be 'auto', 'pca', 'lda', 'identity', 'random' "
"or a numpy array of shape (n_components, n_features).")
return X, y, init
def _initialize(self, X, y, init):
"""Initialize the transformation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The training labels.
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Returns
-------
transformation : ndarray of shape (n_components, n_features)
The initialized linear transformation.
"""
transformation = init
if self.warm_start and hasattr(self, 'components_'):
transformation = self.components_
elif isinstance(init, np.ndarray):
pass
else:
n_samples, n_features = X.shape
n_components = self.n_components or n_features
if init == 'auto':
n_classes = len(np.unique(y))
if n_components <= min(n_features, n_classes - 1):
init = 'lda'
elif n_components < min(n_features, n_samples):
init = 'pca'
else:
init = 'identity'
if init == 'identity':
transformation = np.eye(n_components, X.shape[1])
elif init == 'random':
transformation = self.random_state_.randn(n_components,
X.shape[1])
elif init in {'pca', 'lda'}:
init_time = time.time()
if init == 'pca':
pca = PCA(n_components=n_components,
random_state=self.random_state_)
if self.verbose:
print('Finding principal components... ', end='')
sys.stdout.flush()
pca.fit(X)
transformation = pca.components_
elif init == 'lda':
from ..discriminant_analysis import (
LinearDiscriminantAnalysis)
lda = LinearDiscriminantAnalysis(n_components=n_components)
if self.verbose:
print('Finding most discriminative components... ',
end='')
sys.stdout.flush()
lda.fit(X, y)
transformation = lda.scalings_.T[:n_components]
if self.verbose:
print('done in {:5.2f}s'.format(time.time() - init_time))
return transformation
def _callback(self, transformation):
"""Called after each iteration of the optimizer.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The solution computed by the optimizer in this iteration.
"""
if self.callback is not None:
self.callback(transformation, self.n_iter_)
self.n_iter_ += 1
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
"""Compute the loss and the loss gradient w.r.t. ``transformation``.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where ``mask[i, j] == 1`` if ``X[i]`` and ``X[j]`` belong
to the same class, and ``0`` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ['Iteration', 'Objective Value', 'Time(s)']
header_fmt = '{:>10} {:>20} {:>10}'
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print('[{}]'.format(cls_name))
print('[{}] {}\n[{}] {}'.format(cls_name, header,
cls_name, '-' * len(header)))
t_funcall = time.time()
transformation = transformation.reshape(-1, X.shape[1])
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = softmax(-p_ij) # (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * same_class_mask
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
loss = np.sum(p)
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
# time complexity of the gradient: O(n_components x n_samples x (
# n_samples + n_features))
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = '[{}] {:>10} {:>20.6e} {:>10.2f}'
print(values_fmt.format(self.__class__.__name__, self.n_iter_,
loss, t_funcall))
sys.stdout.flush()
return sign * loss, sign * gradient.ravel()
def _more_tags(self):
return {'requires_y': True}
| bsd-3-clause |
shiguol/tushare | tushare/stock/reference.py | 27 | 25190 | # -*- coding:utf-8 -*-
"""
投资参考数据接口
Created on 2015/03/21
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from __future__ import division
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
from tushare.util import dateu as dt
import pandas as pd
import time
import lxml.html
from lxml import etree
import re
import json
from pandas.compat import StringIO
from tushare.util import dateu as du
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def profit_data(year=2014, top=25,
retry_count=3, pause=0.001):
"""
获取分配预案数据
Parameters
--------
year:年份
top:取最新n条数据,默认取最近公布的25条
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
returns
-------
DataFrame
code:股票代码
name:股票名称
year:分配年份
report_date:公布日期
divi:分红金额(每10股)
shares:转增和送股数(每10股)
"""
if top <= 25:
df, pages = _dist_cotent(year, 0, retry_count, pause)
return df.head(top)
elif top == 'all':
ct._write_head()
df, pages = _dist_cotent(year, 0, retry_count, pause)
for idx in range(1,int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df
else:
if isinstance(top, int):
ct._write_head()
allPages = top/25+1 if top%25>0 else top/25
df, pages = _dist_cotent(year, 0, retry_count, pause)
if int(allPages) < int(pages):
pages = allPages
for idx in range(1, int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df.head(top)
else:
print(ct.TOP_PARAS_MSG)
def _fun_divi(x):
if ct.PY3:
reg = re.compile(r'分红(.*?)元', re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
if isinstance(x, unicode):
s1 = unicode('分红','utf-8')
s2 = unicode('元','utf-8')
reg = re.compile(r'%s(.*?)%s'%(s1, s2), re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
return 0
def _fun_into(x):
if ct.PY3:
reg1 = re.compile(r'转增(.*?)股', re.UNICODE)
reg2 = re.compile(r'送股(.*?)股', re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
if isinstance(x, unicode):
s1 = unicode('转增','utf-8')
s2 = unicode('送股','utf-8')
s3 = unicode('股','utf-8')
reg1 = re.compile(r'%s(.*?)%s'%(s1, s3), re.UNICODE)
reg2 = re.compile(r'%s(.*?)%s'%(s2, s3), re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
return 0
def _dist_cotent(year, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
if pageNo > 0:
ct._write_console()
html = lxml.html.parse(rv.DP_163_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163dp'], year, pageNo))
res = html.xpath('//div[@class=\"fn_rp_list\"]/table')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows=[0])[0]
df = df.drop(df.columns[0], axis=1)
df.columns = rv.DP_163_COLS
df['divi'] = df['plan'].map(_fun_divi)
df['shares'] = df['plan'].map(_fun_into)
df = df.drop('plan', axis=1)
df['code'] = df['code'].astype(object)
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
pages = []
if pageNo == 0:
page = html.xpath('//div[@class=\"mod_pages\"]/a')
if len(page)>1:
asr = page[len(page)-2]
pages = asr.xpath('text()')
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, pages[0] if len(pages)>0 else 0
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def forecast_data(year, quarter):
"""
获取业绩预告数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
type,业绩变动类型【预增、预亏等】
report_date,发布日期
pre_eps,上年同期每股收益
range,业绩变动范围
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_forecast_data(year, quarter, 1, pd.DataFrame())
df = pd.DataFrame(data, columns=ct.FORECAST_COLS)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _get_forecast_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
html = lxml.html.parse(ct.FORECAST_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year, quarter, pageNo,
ct.PAGE_NUM[1]))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('--', '0')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([4, 5, 8], axis=1)
df.columns = ct.FORECAST_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+',nextPage[0])[0]
return _get_forecast_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = dt.get_year() if year is None else year
month = dt.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def fund_holdings(year, quarter,
retry_count=3, pause=0.001):
"""
获取基金持股数据
Parameters
--------
year:年份e.g 2014
quarter:季度(只能输入1,2,3,4这个四个数字)
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:报告日期
nums:基金家数
nlast:与上期相比(增加或减少了)
count:基金持股数(万股)
clast:与上期相比
amount:基金持股市值
ratio:占流通盘比率
"""
start,end = rv.QUARTS_DIC[str(quarter)]
if quarter == 1:
start = start % str(year-1)
end = end%year
else:
start, end = start%year, end%year
ct._write_head()
df, pages = _holding_cotent(start, end, 0, retry_count, pause)
for idx in range(1, pages):
df = df.append(_holding_cotent(start, end, idx, retry_count, pause),
ignore_index=True)
return df
def _holding_cotent(start, end, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
if pageNo>0:
ct._write_console()
try:
request = Request(rv.FUND_HOLDS_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163fh'], ct.PAGES['163fh'],
pageNo, start, end, _random(5)))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines.replace('--', '0')
lines = json.loads(lines)
data = lines['list']
df = pd.DataFrame(data)
df = df.drop(['CODE', 'ESYMBOL', 'EXCHANGE', 'NAME', 'RN', 'SHANGQIGUSHU',
'SHANGQISHIZHI', 'SHANGQISHULIANG'], axis=1)
for col in ['GUSHU', 'GUSHUBIJIAO', 'SHIZHI', 'SCSTC27']:
df[col] = df[col].astype(float)
df['SCSTC27'] = df['SCSTC27']*100
df['GUSHU'] = df['GUSHU']/10000
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO']/10000
df['SHIZHI'] = df['SHIZHI']/10000
df['GUSHU'] = df['GUSHU'].map(ct.FORMAT)
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO'].map(ct.FORMAT)
df['SHIZHI'] = df['SHIZHI'].map(ct.FORMAT)
df['SCSTC27'] = df['SCSTC27'].map(ct.FORMAT)
df.columns = rv.FUND_HOLDS_COLS
df = df[['code', 'name', 'date', 'nums', 'nlast', 'count',
'clast', 'amount', 'ratio']]
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, int(lines['pagecount'])
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def new_stocks(retry_count=3, pause=0.001):
"""
获取新股上市数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
ipo_date:上网发行日期
issue_date:上市日期
amount:发行数量(万股)
markets:上网发行数量(万股)
price:发行价格(元)
pe:发行市盈率
limit:个人申购上限(万股)
funds:募集资金(亿元)
ballot:网上中签率(%)
"""
data = pd.DataFrame()
ct._write_head()
df = _newstocks(data, 1, retry_count,
pause)
return df
def _newstocks(data, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
html = lxml.html.parse(rv.NEW_STOCKS_URL%(ct.P_TYPE['http'],ct.DOMAINS['vsf'],
ct.PAGES['newstock'], pageNo))
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.drop([df.columns[idx] for idx in [1, 12, 13, 14]], axis=1)
df.columns = rv.NEW_STOCKS_COLS
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
res = html.xpath('//table[@class=\"table2\"]/tr[1]/td[1]/a/text()')
tag = '下一页' if ct.PY3 else unicode('下一页', 'utf-8')
hasNext = True if tag in res else False
data = data.append(df, ignore_index=True)
pageNo += 1
if hasNext:
data = _newstocks(data, pageNo, retry_count, pause)
except Exception as ex:
print(ex)
else:
return data
def sh_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取沪市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 为空时取去年今日
end:string
结束日期 format:YYYY-MM-DD 为空时取当前日期
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rqyl: 本日融券余量
rqylje: 本日融券余量金额(元)
rqmcl: 本日融券卖出量
rzrqjyzl:本日融资融券余额(元)
"""
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
if du.diff_day(start, end) < 0:
return None
start, end = start.replace('-', ''), end.replace('-', '')
data = pd.DataFrame()
ct._write_head()
df = _sh_hz(data, start=start, end=end,
retry_count=retry_count,
pause=pause)
return df
def _sh_hz(data, start=None, end=None,
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = rv.MAR_SH_HZ_TAIL_URL%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
url = rv.MAR_SH_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5),
start, end, tail,
_random())
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(url, ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_HZ_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_hz(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sh_margin_details(date='', symbol='',
start='', end='',
retry_count=3, pause=0.001):
"""
获取沪市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
symbol:string
标的代码,6位数字e.g.600848,默认为空
start:string
开始日期 format:YYYY-MM-DD 默认为空''
end:string
结束日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rzche:本日融资偿还额(元)
rqyl: 本日融券余量
rqmcl: 本日融券卖出量
rqchl: 本日融券偿还量
"""
date = date if date == '' else date.replace('-', '')
start = start if start == '' else start.replace('-', '')
end = end if end == '' else end.replace('-', '')
if (start != '') & (end != ''):
date = ''
data = pd.DataFrame()
ct._write_head()
df = _sh_mx(data, date=date, start=start,
end=end, symbol=symbol,
retry_count=retry_count,
pause=pause)
return df
def _sh_mx(data, date='', start='', end='',
symbol='',
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = '&pageHelp.pageNo=%s&pageHelp.beginPage=%s&pageHelp.endPage=%s'%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.MAR_SH_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5), date,
symbol, start, end, tail,
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
if pagecount == 0:
return data
if pageNo == 6:
ct._write_tips(lines['pageHelp'].get('total'))
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_MX_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_mx(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取深市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 默认为上一周的今天
end:string
结束日期 format:YYYY-MM-DD 默认为今日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期(index)
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
data = pd.DataFrame()
if start is None and end is None:
end = du.today()
start = du.day_last_week()
if start is None or end is None:
ct._write_msg(rv.MAR_SZ_HZ_MSG2)
return None
try:
date_range = pd.date_range(start=start, end=end, freq='B')
if len(date_range)>261:
ct._write_msg(rv.MAR_SZ_HZ_MSG)
else:
ct._write_head()
for date in date_range:
data = data.append(_sz_hz(str(date.date()), retry_count, pause) )
except:
ct._write_msg(ct.DATA_INPUT_ERROR_MSG)
else:
return data
def _sz_hz(date='', retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
request = Request(rv.MAR_SZ_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_HZ_COLS
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/networkx-1.7/examples/graph/atlas.py | 20 | 2637 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
#from networkx import *
#from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=nx.graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| mit |
ClimbsRocks/auto_ml | tests/advanced_tests/regressors.py | 1 | 4686 | import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
# We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_boston_train = df_boston_train.sample(frac=0.5)
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound = -3.4
if model_name == 'DeepLearningRegressor':
lower_bound = -24
if model_name == 'LGBMRegressor':
lower_bound = -16
if model_name == 'GradientBoostingRegressor':
lower_bound = -5.1
if model_name == 'CatBoostRegressor':
lower_bound = -4.5
if model_name == 'XGBRegressor':
lower_bound = -4.8
assert lower_bound < test_score < -2.75
def getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -2.9
if model_name == 'DeepLearningRegressor':
lower_bound = -7.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
if model_name == 'CatBoostRegressor':
lower_bound = -3.7
assert lower_bound < first_score < -2.7
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.7
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.