repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
strongh/GPy
|
GPy/plotting/matplot_dep/maps.py
|
4
|
5703
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
try:
import pylab as pb
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
#from matplotlib import cm
try:
__IPYTHON__
pb.ion()
except NameError:
pass
except:
pass
import re
def plot(shape_records,facecolor='w',edgecolor='k',linewidths=.5, ax=None,xlims=None,ylims=None):
"""
Plot the geometry of a shapefile
:param shape_records: geometry and attributes list
:type shape_records: ShapeRecord object (output of a shapeRecords() method)
:param facecolor: color to be used to fill in polygons
:param edgecolor: color to be used for lines
:param ax: axes to plot on.
:type ax: axes handle
"""
#Axes handle
if ax is None:
fig = pb.figure()
ax = fig.add_subplot(111)
#Iterate over shape_records
for srec in shape_records:
points = np.vstack(srec.shape.points)
sparts = srec.shape.parts
par = list(sparts) + [points.shape[0]]
polygs = []
for pj in xrange(len(sparts)):
polygs.append(Polygon(points[par[pj]:par[pj+1]]))
ax.add_collection(PatchCollection(polygs,facecolor=facecolor,edgecolor=edgecolor, linewidths=linewidths))
#Plot limits
_box = np.vstack([srec.shape.bbox for srec in shape_records])
minx,miny = np.min(_box[:,:2],0)
maxx,maxy = np.max(_box[:,2:],0)
if xlims is not None:
minx,maxx = xlims
if ylims is not None:
miny,maxy = ylims
ax.set_xlim(minx,maxx)
ax.set_ylim(miny,maxy)
def string_match(sf,regex,field=2):
"""
Return the geometry and attributes of a shapefile whose fields match a regular expression given
:param sf: shapefile
:type sf: shapefile object
:regex: regular expression to match
:type regex: string
:field: field number to be matched with the regex
:type field: integer
"""
index = []
shape_records = []
for rec in enumerate(sf.shapeRecords()):
m = re.search(regex,rec[1].record[field])
if m is not None:
index.append(rec[0])
shape_records.append(rec[1])
return index,shape_records
def bbox_match(sf,bbox,inside_only=True):
"""
Return the geometry and attributes of a shapefile that lie within (or intersect) a bounding box
:param sf: shapefile
:type sf: shapefile object
:param bbox: bounding box
:type bbox: list of floats [x_min,y_min,x_max,y_max]
:inside_only: True if the objects returned are those that lie within the bbox and False if the objects returned are any that intersect the bbox
:type inside_only: Boolean
"""
A,B,C,D = bbox
index = []
shape_records = []
for rec in enumerate(sf.shapeRecords()):
a,b,c,d = rec[1].shape.bbox
if inside_only:
if A <= a and B <= b and C >= c and D >= d:
index.append(rec[0])
shape_records.append(rec[1])
else:
cond1 = A <= a and B <= b and C >= a and D >= b
cond2 = A <= c and B <= d and C >= c and D >= d
cond3 = A <= a and D >= d and C >= a and B <= d
cond4 = A <= c and D >= b and C >= c and B <= b
cond5 = a <= C and b <= B and d >= D
cond6 = c <= A and b <= B and d >= D
cond7 = d <= B and a <= A and c >= C
cond8 = b <= D and a <= A and c >= C
if cond1 or cond2 or cond3 or cond4 or cond5 or cond6 or cond7 or cond8:
index.append(rec[0])
shape_records.append(rec[1])
return index,shape_records
def plot_bbox(sf,bbox,inside_only=True):
"""
Plot the geometry of a shapefile within a bbox
:param sf: shapefile
:type sf: shapefile object
:param bbox: bounding box
:type bbox: list of floats [x_min,y_min,x_max,y_max]
:inside_only: True if the objects returned are those that lie within the bbox and False if the objects returned are any that intersect the bbox
:type inside_only: Boolean
"""
index,shape_records = bbox_match(sf,bbox,inside_only)
A,B,C,D = bbox
plot(shape_records,xlims=[bbox[0],bbox[2]],ylims=[bbox[1],bbox[3]])
def plot_string_match(sf,regex,field,**kwargs):
"""
Plot the geometry of a shapefile whose fields match a regular expression given
:param sf: shapefile
:type sf: shapefile object
:regex: regular expression to match
:type regex: string
:field: field number to be matched with the regex
:type field: integer
"""
index,shape_records = string_match(sf,regex,field)
plot(shape_records,**kwargs)
def new_shape_string(sf,name,regex,field=2,type=None):
import shapefile
if type is None:
type = shapefile.POINT
newshp = shapefile.Writer(shapeType = sf.shapeType)
newshp.autoBalance = 1
index,shape_records = string_match(sf,regex,field)
_fi = [sf.fields[j] for j in index]
for f in _fi:
newshp.field(name=f[0],fieldType=f[1],size=f[2],decimal=f[3])
_shre = shape_records
for sr in _shre:
_points = []
_parts = []
for point in sr.shape.points:
_points.append(point)
_parts.append(_points)
newshp.line(parts=_parts)
newshp.records.append(sr.record)
print len(sr.record)
newshp.save(name)
print index
def apply_bbox(sf,ax):
"""
Use bbox as xlim and ylim in ax
"""
limits = sf.bbox
xlim = limits[0],limits[2]
ylim = limits[1],limits[3]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
|
bsd-3-clause
|
victorbergelin/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
zooniverse/aggregation
|
experimental/penguins/clusterAnalysis/anomalyDetection.py
|
2
|
4732
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import pymongo
import cPickle as pickle
import os
import math
import sys
import urllib
import matplotlib.cbook as cbook
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from clusterCompare import cluster_compare
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
penguins = pickle.load(open(base_directory+"/Databases/penguins_vote__.pickle","rb"))
#does this cluster have a corresponding cluster in the gold standard data?
#ie. does this cluster represent an actual penguin?
# #user penguins for first image - with 5 images
# print len(penguins[5][0])
# #user data
# print penguins[5][0][0]
# #gold standard data
# #print penguins[5][0][1]
#
# #users who annotated the first "penguin" in the first image
# print penguins[5][0][0][0][1]
# #and their corresponds points
# print penguins[5][0][0][0][0]
#have as a list not a tuple since we need the index
client = pymongo.MongoClient()
db = client['penguin_2014-10-22']
subject_collection = db["penguin_subjects"]
location_count = {}
for subject in subject_collection.find({"classification_count":20}):
zooniverse_id = subject["zooniverse_id"]
path = subject["metadata"]["path"]
slash_index = path.find("_")
location = path[:slash_index]
if subject["metadata"]["counters"]["animals_present"] > 10:
if not(location in location_count):
location_count[location] = 1
print location
print subject["location"]
else:
location_count[location] += 1
for location in sorted(location_count.keys()):
print location + " -- " + str(location_count[location])
assert False
#print gold_standard
#RESET
max_users = 20
image_index = 0
for image_index in range(len(penguins[20])):
#first - create a list of ALL users - so we can figure out who has annotated a "penguin" or hasn't
user_set = []
cluster_dict = {}
#image = penguins[max_users][image_index]
penguin_clusters = penguins[max_users][image_index][1]
zooniverse_id = penguins[max_users][image_index][0]
lowest_cluster = float("inf")
highest_cluster = -float('inf')
for penguin_index in range(len(penguin_clusters)):
users = penguin_clusters[penguin_index][1]
cluster = penguin_clusters[penguin_index][0]
center_x = np.mean(zip(*cluster)[0])
center_y = np.mean(zip(*cluster)[1])
lowest_cluster = min(lowest_cluster,center_y)
highest_cluster = max(highest_cluster,center_y)
cluster_dict[(center_x,center_y)] = users
mid_point = (lowest_cluster+highest_cluser)/2.
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
object_id= str(subject["_id"])
image_path = base_directory+"/Databases/penguins/images/"+object_id+".JPG"
if not(os.path.isfile(image_path)):
urllib.urlretrieve(url, image_path)
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+object_id+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
error_found = False
#start with the bottom half
cluster_list = cluster_dict.keys()
relations = []
for i in range(len(cluster_list)-1):
c_1 = cluster_list[i]
for j in range(i+1,len(cluster_list)):
c_2 = cluster_list[j]
users_1 = cluster_dict[c_1]
users_2 = cluster_dict[c_2]
dist = math.sqrt((c_1[0]-c_2[0])**2+(c_1[1]-c_2[1])**2)
overlap = len([u for u in users_1 if (u in users_2)])
relations.append((dist,overlap,(i,j)))
relations.sort(key = lambda x:x[0])
user_relations = zip(*relations)[1]
cluster_tuples = zip(*relations)[2]
try:
closest_single_connection = user_relations.index(1)
if closest_single_connection > 0:
print "no error"
continue
print relations[0:10]
#we have an error
for ii in range(min(len(user_relations),1)):
if user_relations[ii] == 1:
print ii
i,j = cluster_tuples[ii]
c_1 = cluster_list[i]
c_2 = cluster_list[j]
#X,Y = zip(*cluster_list)
#plt.plot(X,Y,'o')
X,Y = zip(*(c_1,c_2))
plt.plot(X,Y,'-',color="blue")
plt.show()
except ValueError:
print "**"
|
apache-2.0
|
adobe-research/video-lecture-summaries
|
Scripts/linebreak.py
|
1
|
20955
|
'''
Created on Nov 14, 2014
@author: hijungshin
'''
from visualobjects import VisualObject
from kalman import KalmanFilter
import math
import util
import sys
import matplotlib.pyplot as plt
import numpy as np
import cv2
from lecture import Lecture
from writehtml import WriteHtml
import os
import processframe as pf
import overlap
class LineBreaker:
def __init__(self, lec, list_of_objs, objdir="temp", debug=False):
self.lec = lec
self.list_of_objs = list_of_objs
self.line_objs = []
self.numobjs = len(list_of_objs)
n = len(list_of_objs)
self.linecost = [[0 for x in range(n)] for x in range(n)]
self.badness = [[0 for x in range(n)] for x in range(n)]
self.tpenalty = [0 for x in range(n)]
self.xpenalty = [0 for x in range(n)]
self.ypenalty = [0 for x in range(n)]
self.totalcost = [float("inf") for x in range(n)]
self.bestid = [-1 for x in range(n)]
self.cuts = [0 for x in range(n)]
self.start_obj_idx = []
self.end_obj_idx = []
self.debug = debug
self.objdir = objdir
self.linedir = self.objdir + "/lines"
def initial_obj_cost(self):
return -1e7
def dynamic_lines_version1(self, optsec):
n = self.numobjs#n = len(self.list_of_objs)
video = self.lec.video
list_of_objs = self.list_of_objs
# compute cost of lines
#linecost = [[0 for x in range(n)] for x in range(n)]
for i in range(0, n):
"""linecost[i][i]: single object line """
curobj = list_of_objs[i]
if (curobj.start_fid == 0 and curobj.end_fid == 0):
self.linecost[i][i] = self.initial_obj_cost()
else:
self.badness[i][i] = line_badness(list_of_objs[i:i+1], optsec, video.fps)
penalty = self.cut_penalty(i, i+1)
self.linecost[i][i] = self.badness[i][i] + penalty
for j in range(i+1, n):
self.badness[i][j] = line_badness(list_of_objs[i:j+1], optsec, video.fps)
penalty = self.cut_penalty(j, j+1)
self.linecost[i][j] = self.badness[i][j] + penalty
""" compute minimum cost line break """
for i in range(0, n):
self.totalcost[i] = float("inf")
""" c[i] = min (c[j-1] + lc[j,i]) for all j<=i"""
for j in range(0, i+1):
if j == 0:
cost = self.linecost[j][i]
else:
cost = self.totalcost[j-1] + self.linecost[j][i]
if (cost < self.totalcost[i]):
self.totalcost[i] = min(self.totalcost[i], cost)
self.cuts[i] = j # means [j-i] is good cut
self.line_objs = []
self.line_objs = self.getcutlines(n-1)
self.line_objs.reverse()
self.start_obj_idx.reverse()
self.end_obj_idx.reverse()
return self.line_objs, self.start_obj_idx, self.end_obj_idx
def dynamic_lines(self):
self.compute_costs_v3()
self.compute_cuts_v3()
return self.get_opt_lines()
def greedy_lines(self):
self.compute_greedy_cuts()
lines = self.cutlines_nonlinear(self.numobjs)
lineobjs = []
for line in lines:
lineobj = VisualObject.group(line, self.linedir)
lineobjs.append(lineobj)
return lineobjs, [], []
def get_opt_lines(self):
n = self.numobjs
self.line_objs = []
self.line_objs = self.getcutlines(n-1)
self.line_objs.reverse()
self.start_obj_idx.reverse()
self.end_obj_idx.reverse()
VisualObject.write_to_file(self.linedir + "/obj_info.txt", self.line_objs)
return self.line_objs, self.start_obj_idx, self.end_obj_idx
def compute_cuts(self):
n = self.numobjs
""" compute minimum cost line break """
for i in range(0, n):
self.totalcost[i] = float("inf")
""" c[i] = min (c[j-1] + lc[j,i]) for all j<=i"""
for j in range(0, i+1):
if j == 0:
cost = self.linecost[j][i]
else:
cost = self.totalcost[j-1] + self.linecost[j][i]
if (cost < self.totalcost[i]):
self.totalcost[i] = cost
self.cuts[i] = j # means [j-i] is good cut
# print 'self.totalcost[',i,']=', self.totalcost[i]
print self.totalcost
def compute_greedy_cuts(self):
n = self.numobjs
self.totalcost[0] = linecost_v3(self.list_of_objs[0:1])
self.cuts[0] = 0
self.bestid[0] = 0
for i in range(1, n):
newobj = self.list_of_objs[i]
mincost = float("inf")
j = i -1
cj = self.totalcost[j]
minaddcost = float("inf")
prevlines = []
prevlines = self.cutlines_nonlinear(i-1)
bestline = 0
for idx in range(0, len(prevlines)):
line = prevlines[idx]
add = addcost_v3(line, newobj)
# if (len(prevlines) == 5):
# print 'i', i, 'prev line idx', idx, 'of', len(prevlines)
# print 'addcost', add
# tempobj = VisualObject.group(line, "temp")
# util.showimages([tempobj.img, newobj.img])
if (add < minaddcost):
bestline = idx
minaddcost = min(add, minaddcost)
newlinecost = linecost_v3(self.list_of_objs[i:i+1])
# print 'newlinecost', newlinecost
# util.showimages([newobj.img])
if (newlinecost < minaddcost):
minaddcost = newlinecost
bestline = len(prevlines)
jcost = cj + minaddcost
if (jcost < mincost):
mincost = jcost
self.totalcost[i] = mincost
self.cuts[i] = j
self.bestid[i] = bestline
print 'totalcost', self.totalcost
print 'cuts', self.cuts
print 'bestid', self.bestid
def compute_costs_v1(self, optsec):
n = self.numobjs#n = len(self.list_of_objs)
video = self.lec.video
list_of_objs = self.list_of_objs
# compute cost of lines
#linecost = [[0 for x in range(n)] for x in range(n)]
for i in range(0, n):
"""linecost[i][i]: single object line """
curobj = list_of_objs[i]
if (curobj.start_fid == 0 and curobj.end_fid == 0):
self.linecost[i][i] = self.initial_obj_cost()
else:
self.badness[i][i] = line_badness(list_of_objs[i:i+1], optsec, video.fps)
penalty = self.cut_penalty(i, i+1)
self.linecost[i][i] = self.badness[i][i] + penalty
for j in range(i+1, n):
self.badness[i][j] = line_badness(list_of_objs[i:j+1], optsec, video.fps)
penalty = self.cut_penalty(j, j+1)
self.linecost[i][j] = self.badness[i][j] + penalty
def compute_costs_v2(self):
n = self.numobjs
list_of_objs = self.list_of_objs
for i in range(0, n):
for j in range(i, n):
self.linecost[i][j] = -1.0* VisualObject.compactness(list_of_objs[i:j+1])
# temp = VisualObject.group(list_of_objs[i:j+1], "temp")
# print 'self.linecost', self.linecost[i][j]
# util.showimages([temp.img])
# print self.linecost[i]
# util.showimages(self.list_of_objs[0].img)
def compute_costs_v3(self):
n = self.numobjs
list_of_objs = self.list_of_objs
for i in range(0, n):
self.linecost[i][i] = 0
for j in range(i+1, n):
self.linecost[i][j] = self.linecost[i][j-1] + addcost_v3(list_of_objs[i:j], list_of_objs[j])
# if (i == 5):
# temp = VisualObject.group(list_of_objs[i:j+1], "temp")
# print 'self.linecost', self.linecost[i][j]
# util.showimages([temp.img])
# print self.linecost[i]
# util.showimages(self.list_of_objs[0].img)
def compute_costs(self):
n = self.numobjs
video = self.lec.video
list_of_objs = self.list_of_objs
# compute cost of lines
#linecost = [[0 for x in range(n)] for x in range(n)]
for i in range(0, n):
"""linecost[i][i]: single object line """
curobj = list_of_objs[i]
if (curobj.start_fid == 0 and curobj.end_fid == 0):
self.badness[i][i] = -100
self.linecost[i][i] = self.badness[i][i]
for j in range(i+1, n):
nextobj = list_of_objs[j:j+1][0]
self.badness[i][j] = self.linecost[i][j-1] + self.addcost_v2(list_of_objs[i:j], nextobj)
self.linecost[i][j] = self.badness[i][j]
def addcost(self, list_of_objs, newobj):
if newobj is None:
return 0
penalty = 0
cx = (newobj.tlx + newobj.brx)/2.0
cy = (newobj.tly + newobj.bry)/2.0
z1minx, z1miny, z1maxx, z1maxy = VisualObject.bbox(list_of_objs)
z1width = z1maxx - z1minx + 1
xpad = 30
ypad = min(max(30, 2500.0/z1width), 50)
z1minx -= xpad
z1miny -= ypad
z1maxx += xpad
z1maxy += ypad
if z1miny <= cy and cy <= z1maxy:
additem = -100
if newobj.brx >= z1minx and newobj.tlx <= z1maxx: # in bbox
penalty = 0
elif newobj.brx < z1minx: # inline left:
penalty = z1minx - newobj.brx + xpad
elif newobj.tlx > z1maxx: #inline right:
penalty = newobj.tlx - z1maxx + xpad
else:
print 'linebreak.addcost ERROR: inline, not in bbox, neither left nor right'
if penalty > 100:
penalty += 500
additem = 0
else:
xpenalty = abs(z1maxx - newobj.tlx)
if cy > z1maxy:
ypenalty = cy-z1maxy + ypad
elif cy < z1miny:
ypenalty = z1miny - cy + ypad
else:
ypenalty = 0
print 'linebreak.addcost ERROR: should never get here'
penalty = xpenalty + ypenalty
additem = 2000
# if list_of_objs[0].start_fid == 2760:
# print 'penalty', penalty, 'additem', additem, '=', penalty+additem
# templine = VisualObject.group(list_of_objs,"temp")
# util.showimages([templine.img, newobj.img], "line and newobj")
return additem + penalty
def addcost_v2(self, list_of_objs, newobj):
if newobj is None:
return 0
cx = (newobj.tlx + newobj.brx)/2.0
cy = (newobj.tly + newobj.bry)/2.0
z1minx, z1miny, z1maxx, z1maxy = VisualObject.bbox(list_of_objs)
xpenalty = 0
ypenalty = 0
if z1miny <= cy and cy <= z1maxy: # definitely in-line
additem = -100
if newobj.brx >= z1minx and newobj.tlx <= z1maxx: # in bbox
penalty = 0
elif newobj.brx < z1minx: # inline left:
penalty = z1minx - newobj.brx
elif newobj.tlx > z1maxx: #inline right:
penalty = newobj.tlx - z1maxx
else:
print 'linebreak.addcost ERROR: inline, not in bbox, neither left nor right'
else:
xpenalty = abs(z1maxx - newobj.tlx)
if cy > z1maxy:
ypenalty = cy-z1maxy
elif cy < z1miny:
ypenalty = z1miny - cy
else:
ypenalty = 0
print 'linebreak.addcost ERROR: should never get here'
penalty = xpenalty + ypenalty
if (newobj.tlx > z1maxx and xpenalty > 100):
additem = 2000
if (newobj.brx < z1minx and xpenalty > 100):
additem = 2000
if (xpenalty > 100 and ypenalty > 20):
additem = 2000
else:
additem = 0
# additem = -100
# if xpenalty > 100 and ypenalty > 100:
#
# if list_of_objs[0].start_fid == 10460:
# print 'xpenalty', xpenalty, 'ypenalty', ypenalty, 'additem', additem, '=', penalty+additem
# templine = VisualObject.group(list_of_objs,"temp")
# util.showimages([templine.img, newobj.img], "line and newobj")
return additem + penalty
def cutlines_nonlinear(self, n):
segments_unique = np.unique(self.bestid[0:n+1])
n_segments = len(segments_unique)
lines = [[] for i in range(0, n_segments)]
for i in range(0, len(self.bestid[0:n+1])):
# print 'i', i, 'of', (self.numobjs)
idx = self.bestid[i]
obj = self.list_of_objs[i]
lines[idx].append(obj)
return lines
def getcutlines(self, n, line_dir=None):
if line_dir is None:
linedir = self.linedir
else:
linedir = line_dir
if not os.path.exists(linedir):
os.makedirs(linedir)
line = VisualObject.group(self.list_of_objs[self.cuts[n]:n+1], linedir)
self.start_obj_idx.append(self.cuts[n])
self.end_obj_idx.append(n)
self.line_objs.append(line)
if self.cuts[n] == 0:
return self.line_objs
else:
return self.getcutlines(self.cuts[n]-1)
def cut_penalty(self, i, j):
if (i < len(self.list_of_objs)):
obj_i = self.list_of_objs[i]
else:
obj_i = None
if (j < len(self.list_of_objs)):
obj_j = self.list_of_objs[j]
else:
obj_j = None
if obj_i is None or obj_j is None:
return 0
pt = 0
xgap = VisualObject.xgap_distance(obj_i, obj_j)
if (xgap < 0):
px = 2.0 * xgap
else:
px = - 1.0 * xgap
ygap = VisualObject.ygap_distance(obj_i, obj_j)
py = -1.0* VisualObject.ygap_distance(obj_i, obj_j)
penalty = pt + 0.5*px + py
self.xpenalty[i] = px
self.ypenalty[i] = py
self.tpenalty[i] = pt
return penalty
def write_to_html(self, html, objdir, list_of_objs=None):
if (list_of_objs is None):
list_of_objs = self.line_objs
stc_idx = 0
nfig = 1
obj_idx = 0
for obj_idx in range(0, len(list_of_objs)):
obj = list_of_objs[obj_idx]
t = self.lec.video.fid2ms(obj.end_fid)
paragraph = []
# while(self.lec.list_of_stcs[stc_idx][-1].endt < t):
# # write sentence
# paragraph = paragraph + self.lec.list_of_stcs[stc_idx]
# stc_idx += 1
# if (stc_idx >= len(self.lec.list_of_stcs)):
# break
html.paragraph_list_of_words(paragraph)
html.figure(list_of_objs[obj_idx].imgpath, "Merged Figure %i" % nfig)
if (self.debug):
# html.figure(self.line_objs[obj_idx].imgpath, "Original Figure %i" % nfig)
start_id = self.start_obj_idx[obj_idx]
end_id = self.end_obj_idx[obj_idx]
i = str(start_id)
j = str(end_id)
# html.image(objdir + "/" + self.list_of_objs[start_id].imgpath, classstring="debug")
# html.image(objdir + "/" + self.list_of_objs[end_id].imgpath, classstring="debug")
html.writestring("<p class=\"debug\">")
html.writestring("objects " + i + " - " + j + "<br>")
html.writestring("line badness["+i+"]["+j+"] = " + str(self.badness[start_id][end_id]) + "<br>")
html.writestring("t penalty = " + str(self.tpenalty[end_id]) +"<br>" )
html.writestring("x penalty = " + str(self.xpenalty[end_id]) +"<br>" )
html.writestring("y penalty = " + str(self.ypenalty[end_id]) +"<br>" )
html.writestring("</p>")
nfig += 1
def is_jump(lineobjs, obj):
return False
def yctr_distance(list_of_objs, i, j):
"""distance between y coordinates of the center of object i and object j"""
obj_i = list_of_objs[i]
obj_j = list_of_objs[j]
i_ctr = (obj_i.tly + obj_i.bry)/2.0
j_ctr = (obj_j.tly + obj_j.bry)/2.0
return abs(i_ctr - j_ctr)
def time(lineobjs, obj, fps):
start_fid = lineobjs[0].start_fid
end_fid = obj.end_fid
nframes = end_fid - start_fid
sec = nframes/fps
return sec
def num_words():
return -1
def visual_content():
return -1
def is_cut(lineobjs, obj, fps, min_gap, maxt, max_words, max_visual):
if time(lineobjs, obj) > maxt:
return True
if num_words() > max_words:
return True
if visual_content() > max_visual:
return True
if is_jump(lineobjs, obj):
return True
return False
def inline_y(ymin, ymax, curobj):
minvar = (ymax - ymin)/5.0
maxvar = (ymax - ymin)/5.0
if (curobj.bry >= ymin - minvar):
if (curobj.tly <= ymax + maxvar):
return True
return False
return False
def getcutlines(cuts, list_of_objs, n, lineobjs):
line = list_of_objs[cuts[n]:n+1]
lineobjs.append(line)
if cuts[n] == 0:
return lineobjs
else:
return getcutlines(cuts, list_of_objs, cuts[n]-1, lineobjs)
def linecost_v3(list_of_objs):
linecost = 0
for i in range(0, len(list_of_objs)):
linecost += addcost_v3(list_of_objs[0:i], list_of_objs[i])
return linecost
def addcost_v3(list_of_objs, newobj):
if (len(list_of_objs) == 0):
return 50
if newobj is None:
return 0
cx = (newobj.tlx + newobj.brx) / 2.0
cy = (newobj.tly + newobj.bry) / 2.0
minx, miny, maxx, maxy = VisualObject.bbox(list_of_objs)
if miny <= cy and cy <= maxy:
if minx <= newobj.brx and newobj.tlx <= maxx:
xcost = 0
ycost = 0
print 'in box'
return -100
elif newobj.brx < minx: # inline-left
xcost = minx - newobj.brx
ycost = abs((maxy + miny) / 2.0 - cy)
print 'inline left', 'xcost', xcost, 'ycost', ycost
elif newobj.tlx > maxx: # inline-right
xcost = newobj.tlx - maxx
ycost = abs((maxy + miny) / 2.0 - cy)
print 'inline right', 'xcost', xcost, 'ycost', ycost
else:
print 'linebreak.addcost Error: inline, not in box, neither left nor right'
return xcost + ycost*0.25 - 50
else: # above or below
if newobj.tly > maxy: # above
ycost = newobj.tly - maxy
elif newobj.bry < miny: # below
ycost = miny - newobj.bry
else:
ycost = 0 # negative overlap
ycost = -(min(newobj.bry, maxy) - max(newobj.tly, miny))
xcost = min(abs(maxx - newobj.brx), abs(maxx-newobj.tlx))
print 'above or below', 'xcost', xcost, 'ycost', ycost
return xcost + ycost
def line_badness(list_of_objs, optsec, fps):
fgpixcount = 0
for obj in list_of_objs:
fgmask = pf.fgmask(obj.img)
fgpixcount += np.count_nonzero(fgmask)
# print 'fgpixcount = ', np.count_nonzero(fgmask)
# util.showimages([obj.img, fgmask], "object and mask")
if fgpixcount > 2000:
return 0
else:
return (2000 - fgpixcount)
if __name__ == "__main__":
videopath = sys.argv[1]
scriptpath = sys.argv[2]
objdir = sys.argv[3]
panoramapath = sys.argv[4]
panorama = cv2.imread(panoramapath)
lec = Lecture(videopath, None)
print lec.video.fps
img_objs = VisualObject.objs_from_file(lec.video, objdir)
breaker = LineBreaker(lec, img_objs, objdir, debug=False)
line_objs, start_obj_idx, end_obj_idx = breaker.greedy_lines()
html = WriteHtml(objdir + "/dynamic_linebreak_test_v5.html", title="Test line break v5", stylesheet="../Mainpage/summaries.css")
html.opendiv(idstring="summary-container")
breaker.write_to_html(html, objdir, list_of_objs=line_objs)
html.closediv()
html.closehtml()
|
bsd-2-clause
|
abhishekkrthakur/scikit-learn
|
examples/cluster/plot_cluster_comparison.py
|
12
|
4718
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch'
]
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch
]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
bsd-3-clause
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part05-e04_cyclists_per_day/src/cyclists_per_day.py
|
1
|
1863
|
#!/usr/bin/env python3
import os
import pandas as pd
import matplotlib.pyplot as plt
def cyclists(f):
df = pd.read_csv(f, sep = ";")
df = df.dropna(axis = 0, how = "all")
return df.dropna(axis = 1, how = "all")
def split_date(df):
df = df.Päivämäärä.str.split(expand = True)
colnames = ["Weekday", "Day", "Month", "Year", "Hour"]
df.columns = colnames
old_week = ["ma", "ti", "ke", "to", "pe", "la", "su"]
week = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
for i in range(len(week)):
df.Weekday = df.Weekday.str.replace(old_week[i], week[i])
months = ["tammi", "helmi", "maalis", "huhti", "touko", "kesä", "heinä", "elo", "syys", "loka", "marras", "joulu"]
i = 1
for i in range(0, len(months)):
df.Month = df.Month.replace(months[i], i+1)
df.Month = pd.to_numeric(df.Month.map(int), downcast = "integer")
df.Hour = df.Hour.str.extract(r"([0-9]*)", expand = False)
df.Hour = df.Hour.map(int)
df.Day = df.Day.astype("int")
df.Year = df.Year.map(int)
df.Weekday = df.Weekday.astype("object")
df = df.astype({"Weekday": object, "Day": int, "Month": int, "Year": int, "Hour": int})
return df
def split_date_continues(f):
df = cyclists(f)
df2 = split_date(df)
df = df.drop("Päivämäärä", axis = 1)
return pd.concat([df2,df], axis = 1)
def cyclists_per_day():
f = os.path.dirname(os.path.realpath(__file__)) + "/Helsingin_pyorailijamaarat.csv"
df = split_date_continues(f)
df = df.drop(["Hour","Weekday"], axis = 1)
groups = df.groupby(["Year", "Month", "Day"])
return groups.sum()
def main():
df = cyclists_per_day()
daily_counts = df.sum(axis = 1)
aug_2017 = daily_counts[1308:1339]
print(aug_2017)
plt.plot(range(1,32), aug_2017.values)
plt.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
jimsrc/seatos
|
mixed/src/mcflag2/bonitos.py
|
3
|
4226
|
import os
from pylab import *
from numpy import *
import matplotlib.patches as patches
import matplotlib.transforms as transforms
import console_colors as ccl
#------------------------------
Nsh = 63#47 #100 # revisar a ojo
nbins = 50 # (revisar a ojo) bine por unidad de tiempo normalizado
MCwant = '2' # '2', '2.2H'
WangFlag = 'NaN'
CorrShift = True
#dTdays = 0.1 # revisar a ojo
if CorrShift==True:
prexShift = 'wShiftCorr'
else:
prexShift = 'woShiftCorr'
#------------------------------
# varname, range-for-plot, label, N-mcs, N-sheaths
VARstf = []
VARstf += [['B', [5., 19.], 'B [nT]', 63, 57]]
VARstf += [['V', [380., 600.], 'Vsw [km/s]', 59, 57]]
VARstf += [['rmsBoB', [0.015, 0.14], 'rms($\\vec B) / B$ [1]', 63, 57]]
VARstf += [['beta', [0.1, 10.], '$\\beta$ [1]', 52, 50]]
VARstf += [['Pcc', [3., 19.], 'proton density [#/cc]', 52, 50]]
VARstf += [['Temp', [1e4, 3e5], 'Temp [K]', 53, 50]]
VARstf += [['AlphaRatio', [0.02, 0.09], 'alpha ratio [1]', 45, 19]]
nvars = len(VARstf)
dir_figs = '../plots/%s/MCflag%s/bonitos' % (prexShift, MCwant)
try:
os.system('mkdir -p %s' % dir_figs)
except:
print ccl.On+ " ---> Ya existe: %s" % dir_figs + ccl.W
print ccl.On+" generando figuras en: %s"%dir_figs + ccl.W
fgap=0.2 # fraccion-de-gap-tolerado que escojo plotear
#------------------------------
for i in range(nvars):
varname = VARstf[i][0]
ylims = VARstf[i][1]
ylabel = VARstf[i][2]
Nmc = VARstf[i][3]
Nsh = VARstf[i][4]
fname_sh = '../../../sheaths/ascii/MCflag%s/%s/MCflag%s_2before.4after_Wang%s_fgap%1.1f_%s.txt' % (MCwant, prexShift, MCwant, WangFlag, fgap, varname)
fname_mc = '../../../mcs/ascii/MCflag%s/%s/MCflag%s_2before.4after_Wang%s_fgap%1.1f_%s.txt' % (MCwant, prexShift, MCwant, WangFlag, fgap, varname)
varsh = loadtxt(fname_sh, unpack=True)
varmc = loadtxt(fname_mc, unpack=True)
cond_sh = varsh[0]<1.0
cond_mc = varmc[0]>0.0
#------ sheath
t_sh = varsh[0][cond_sh]
var_med_sh = varsh[1][cond_sh]
var_avr_sh = varsh[2][cond_sh]
var_std_sh = varsh[3][cond_sh]
var_n_sh = varsh[4][cond_sh]
#------ mc
t_mc = varmc[0][cond_mc]*3. + 1.0
var_med_mc = varmc[1][cond_mc]
var_avr_mc = varmc[2][cond_mc]
var_std_mc = varmc[3][cond_mc]
var_n_mc = varmc[4][cond_mc]
#---------------------------------------------------
fig = figure(1, figsize=(6, 3))
ax = fig.add_subplot(111)
ax.plot(t_sh, var_avr_sh, '-o', alpha=.7, c='black', markeredgecolor='none', label='average', markersize=5)
ax.plot(t_mc, var_avr_mc, '-o', alpha=.7, c='black', markeredgecolor='none', markersize=5)
# bandas de errores en sheath
inf = var_avr_sh-var_std_sh/sqrt(var_n_sh)
sup = var_avr_sh+var_std_sh/sqrt(var_n_sh)
ax.fill_between(t_sh, inf, sup, facecolor='gray', alpha=0.5)
# bandas de errores en MC
inf = var_avr_mc - var_std_mc/sqrt(var_n_mc)
sup = var_avr_mc + var_std_mc/sqrt(var_n_mc)
ax.fill_between(t_mc, inf, sup, facecolor='gray', alpha=0.5)
# pinta ventana de sheath
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1.0, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
# pinta ventana de mc
rect1 = patches.Rectangle((1., 0.), width=3.0, height=1,
transform=trans, color='blue',
alpha=0.2)
ax.add_patch(rect1)
ax.plot(t_sh, var_med_sh, '-*', markersize=5 ,alpha=.8, c='red', markeredgecolor='none', label='median')
ax.plot(t_mc, var_med_mc, '-*', markersize=5 ,alpha=.8, c='red', markeredgecolor='none')
ax.grid()
ax.set_ylim(ylims);
ax.set_xlim(-2., 7.)
ax.legend(loc='upper right')
if ((varname=='beta') | (varname=='AlphaRatio')) | (varname=='rmsBoB'):
ax.set_xlabel('mixed time scale [1]')
else:
ax.set_xlabel('.')
ax.set_ylabel(ylabel)
TITLE = '# of MCs: %d \n# of sheaths: %d' % (Nmc, Nsh)
ax.set_title(TITLE)
if varname=='beta':
ax.set_yscale('log')
#show()
fname_fig = '%s/MCflag%s_2before.4after_Wang%s_fgap%1.1f_%s' % (dir_figs, MCwant, WangFlag, fgap, varname)
savefig('%s.png'%fname_fig, dpi=200, format='png', bbox_inches='tight')
savefig('%s.pdf'%fname_fig, dpi=200, format='pdf', bbox_inches='tight')
#savefig('%s.eps'%fname_fig, dpi=200, format='eps', bbox_inches='tight') SALE FEO :(
close()
|
mit
|
Saxafras/Spacetime
|
spacetime/Local_Measures/Transducers.py
|
1
|
8221
|
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
from itertools import product
def transducer_18():
'''
Zero-wildcard transducer. Domain in ECA rule 18.
Returns
-------
Tuple: (transducer, start_state, synch_time)
transducer: mapping of current machine state and string input to next machine state and output
start_state: starting state of the transducer
synch_time: number of transient steps needed for transducer to synchronize with input data
'''
transducer = { ('a', 0):('a', 0) , ('a', 1):('b', 0) , ('b', 1):('b', 1) , ('b', 0):('c', 0) , ('c', 0):('b', 0) , ('c',1):('b',0)}
start_state = 'a'
synch_time = 0
return (transducer, start_state, synch_time)
def transducer_54():
'''
Domain transducer for ECA rule 54. 8 state, period 2 domain.
Returns
-------
Tuple: (transducer, start_state, synch_time)
transducer: mapping of current machine state and string input to next machine state and output
start_state: starting state of the transducer
synch_time: number of transient steps needed for transducer to synchronize with input data.
'''
transducer = { ('ta', 0):('tb', 0) , ('ta', 1):('tc', 0) , ('tb', 0):('td', 0) , ('tb', 1):('te', 0) , ('tc', 0):('tf', 0) , ('tc',1):('tg',0) , ('td', 0):('d',0) , ('td', 1):('a',0) , ('te', 0):('b',0) , ('te',1):('g',0) , ('tf', 0):('c', 0) , ('tf', 1):('f',0) , ('tg', 0):('h', 0) , ('tg', 1):('e', 0) , ('a', 0):('b', 0) , ('a', 1):('g', 1) , ('b', 0):('c', 0) , ('b', 1):('f', 2) , ('c', 0):('d', 0) , ('c', 1):('a', 3) , ('d', 0):('d', 4) , ('d', 1):('a', 0) , ('e', 0):('c', 5) , ('e', 1):('f', 0) , ('f', 0):('b', 6) , ('f', 1):('g', 0) , ('g' , 0):('e', 7) , ('g', 1):('h', 0) , ('h', 0):('e', 0) , ('h', 1):('h', 8)}
start_state = 'ta'
synch_time = 3
return (transducer, start_state, synch_time)
class transducer(object):
def __init__(self, d, start, synch):
'''Instantiates transducer.
Parameters
----------
d : Python dict
Tuple to Tuple mapping. Maps (current machine state, input symbol) to
(next machine state, output symbol).
start: str
Start state for the transducer.
synch: int
Number of synchronization steps needed to reach recurrent component of transducer.
Instance Variables
------------------
self._transducer:
Python dict mapping current machine state and input symbol to next machine state and output symbol
self._start_state:
Start state for the machine.
self._synch:
Number of synchronization steps.
'''
self._transducer = d
self._start = start
self._synch = synch
def scan(self, data, direction = 'right'):
'''
Takes data string as transducer input and returns corresponding transducer output
Parameters
----------
data: array_like
Input string to be scanned by transducer
direction: str
Direction 'right' or 'left' which transducer will scan the input data. Default to 'right'.
Returns
-------
output: array
Array of raw transducer output.
'''
length = np.size(data)
output = np.zeros(length, dtype = int)
#initialize machine state to start state value
machine = self._start
#run loop in appropriate direction to scan the data with the transducer using the trandsucers
#dictionary / mapping, updating machine state and output at each step
for c in xrange(length + self._synch):
if direction == 'right':
machine, output[c % length] = self._transducer[(machine, data[c % length])]
elif direction == 'left':
i = length - (c+1)
machine, output[i % length] = self._transducer[(machine, data[i % length])]
return output
def mask(self, data, alphabet_size, direction = 'right'):
'''
Takes data string as transducer input and returns corresponding transducer output overlayed
ontop of original data, i.e. domains retain their structure.
Parameters
----------
data: array_like
Input string to be scanned by transducer.
direction: str
Direction 'right' or 'left' which transducer will scan the input data. Default to 'right'.
Returns
-------
output: array
Array of transducer output overlayed ontop of original data.
'''
length = np.size(data)
mask = np.zeros(length, dtype = int)
hold = np.copy(data)
#initialize machine state
machine = self._start
#scan data with transducer, updating machine state and output each step
for c in xrange(length + self._synch):
if direction == 'right':
machine, mask[c % length] = self._transducer[(machine, data[c % length])]
elif direction == 'left':
i = length - (c+1)
machine, mask[i % length] = self._transducer[(machine, data[i % length])]
#clear non-domain values so mask can be simply added on top of original data, but not have
#interference with non-zero non-domain values.
hold[mask != 0] = 0
#transducer output is zero for all state values in domain, so must add (alphabet_size - 1)
#onto transducer output to give unique values to defects above the original alphabet, which
#may be present in the domain.
mask[mask != 0] += (alphabet_size - 1)
return hold + mask
def domain_filter(self, data, fill = False):
'''
Takes data string as transducer input and returns simplified transducer output with only two symbols;
domain (0) or not domain (1).
Parameters
----------
data: array_like
Input string to be scanned by transducer.
fill: bool
Set to True if there is a discrepency between left and right transducer scan. This will
run both directions and fill-in between.
False will return simplified output of single-direction scan.
Returns
-------
output: array
Array of simplified transducer output.
'''
scanL = self.scan(data, 'left')
scanR = self.scan(data, 'right')
length = np.size(data)
hold = np.zeros(length, dtype = int)
if fill:
go = False
for c in xrange(length):
if scanL[c] != 0 and go == False:
go = True
hold[c] = 1
elif (scanL[c] == 0) and (scanR[c] == 0) and (go == True):
hold[c] = 1
elif scanR[c] != 0 and scanL[c] == 0 and go == True:
hold[c] = 1
go = False
else:
hold = np.copy(scanR)
hold[hold != 0] = 1
return hold
def spacetime_scan(self, data, direction = 'right'):
'''
'''
rows, columns = np.shape(data)
output = np.zeros((rows, columns), dtype = int)
for i in xrange(rows):
output[i] = self.scan(data[i], direction)
return output
def spacetime_mask(self, data, alphabet_size, direction = 'right'):
'''
'''
rows, columns = np.shape(data)
output = np.zeros((rows, columns), dtype = int)
for i in xrange(rows):
output[i] = self.mask(data[i], alphabet_size, direction)
return output
def spacetime_filter(self, data, fill = False):
'''
'''
rows, columns = np.shape(data)
output = np.zeros((rows, columns), dtype = int)
for i in xrange(rows):
output[i] = self.domain_filter(data[i], fill)
return output
|
bsd-3-clause
|
drewleonard42/CoronaTemps
|
temperature.py
|
1
|
13293
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 5 15:15:09 2014
@author: drew
"""
from matplotlib import use
use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, _cm
from matplotlib import patches
import numpy as np
import sunpy
from sunpy.map import Map, GenericMap
from sunpy.instr.aia import aiaprep
from sys import argv
from os import path, makedirs
import subprocess32 as subp
from scipy.io.idl import readsav as read
import glob
home = path.expanduser('~')
cortemps = path.join(home, 'CoronaTemps')
class TemperatureMap(GenericMap):
def __init__(self, date=None, n_params=1, data_dir=None, maps_dir=None,
fname=None, infofile=None, submap=None, verbose=False,
force_temp_scan=False):
if (not fname and not date) or (fname and date):
print """You must specify either a date and time for which to create
temperatures or the name of a file containing a valid
TemperatureMap object."""
return
if date:
date = sunpy.time.parse_time(date)
if data_dir is None:
data_dir = '/media/huw/SDO_data/'
if maps_dir is None:
maps_dir='/media/huw/temperature_maps/{}pars/'.format(n_params)
fname = path.join(maps_dir, '{:%Y-%m-%dT%H_%M_%S}.fits'.format(date))
if infofile:
data_dir = None
maps_dir = open(infofile).readline()[:-1]
fname = path.join(maps_dir, '{:%Y-%m-%dT%H:%M:%S}.fits'.format(date))
fname.replace('/images/', '/data/')
if n_params != 1:
fname = fname.replace('.fits', '_full.fits')
if fname and not date:
data_dir = path.dirname(fname)
if verbose: print fname, data_dir
try:
newmap = Map(fname)
GenericMap.__init__(self, newmap.data[..., 0], newmap.meta)
self.goodness_of_fit = newmap.data[..., -1]
if newmap.data.shape[2] != 2:
self.dem_width = newmap.data[..., 1]
self.emission_measure = newmap.data[..., 2]
except ValueError:
cmdargs = ["python", path.join(cortemps, 'create_tempmap.py'),
date, n_params, data_dir, infofile, submap, verbose, force_temp_scan]
if n_params != 1:
cmdargs = ["mpiexec", "-n", 16] + cmdargs
cmdargs = [str(cmd) for cmd in cmdargs]
status = subp.call(cmdargs)
newmap = Map(path.join(cortemps, 'temporary.fits'))
subp.call(["rm", path.join(cortemps, 'temporary.fits')])
data, meta = newmap.data, newmap.meta
if verbose: print data.shape
GenericMap.__init__(self, data[..., 0], meta)
if data.shape[2] != 2:
data[data == 0] = np.nan
self.dem_width = data[..., 1]
self.emission_measure = data[..., 2]
self.goodness_of_fit = data[..., -1]
if verbose:
print self.shape
print self.goodness_of_fit.shape
if n_params != 1:
print self.dem_width.shape
print self.emission_measure.shape
lowx, highx = (self.xrange[0] / self.scale['x'],
self.xrange[1] / self.scale['x'])
lowy, highy = (self.yrange[0] / self.scale['y'],
self.yrange[1] / self.scale['y'])
x_grid, y_grid = np.mgrid[lowx:highx-1, lowy:highy-1]
r_grid = np.sqrt((x_grid ** 2.0) + (y_grid ** 2.0))
outer_rad = (self.rsun_arcseconds * 1.5) / self.scale['x']
self.data[r_grid > outer_rad] = None
self.meta['date-obs'] = str(date)
tmapcubehelix = _cm.cubehelix(s=2.8, r=0.7, h=2.0, gamma=1.0)
cm.register_cmap(name='temphelix', data=tmapcubehelix)
self.cmap = cm.get_cmap('temphelix')
self.data_dir = data_dir
self.maps_dir = maps_dir
self.temperature_scale = 'log'
self.region = None
self.region_coordinate = {'x': 0.0, 'y': 0.0}
if n_params == 3:
self.n_params = 3
else:
self.n_params = 1
return
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
return header.get('instrume', '').startswith('temperature')
def region_map(self, region, mapsize=300, *args, **kwargs):
"""
A function to take as input a hek record or similar and create a submap
showing just the corresponding region
"""
x, y = region['hpc_coord']
newmap = self.submap([x-mapsize, x+mapsize], [y-mapsize, y+mapsize],
*args, **kwargs)
self.region_coordinate = {'x': x, 'y': y}
self.region = region
return newmap
def select_temps(self, mintemp, maxtemp):
"""
Function to highlight user-defined temperatures
"""
newdata = np.ones(self.data.shape) * np.NaN
indices = np.where((self.data > mintemp) * (self.data < maxtemp))
newdata[indices] = self.data[indices]
return Map(newdata, self.meta.copy())
def convert_scale(self, scale='linear'):
if self.temperature_scale == scale:
print "Temperatures are already measured on a {} scale.".format(
scale)
return
elif scale == 'linear':
self.data = (10.0 ** self.data) / 1.0e6
elif scale == 'log':
self.data = np.log10(self.data)
self.temperature_scale = scale
return
def compare(self, display_wlen='171', context_wlen=None, extra_maps=[]):
valid_wlens = ['94', '131', '171', '195', '211', '335', '304', 'hmi']
if display_wlen.lower() not in valid_wlens:
print "Display wavelength provided invalid or None."
output = self.plot()#*temp_args, **temp_kwargs)
return output
save_output = True
data_dir = self.data_dir
maps_dir = self.maps_dir
date = self.date
nmaps = 2 + len(extra_maps)
if context_wlen:
nrows = 2
else:
nrows = 1
fig = plt.figure(figsize=(24, 14))
fig.add_subplot(nrows, nmaps, nmaps, axisbg='k')
self.plot()#*temp_args, **temp_kwargs)
plt.colorbar(orientation='horizontal')
displaymap = Map(data_dir+'{0}/{1:%Y/%m/%d}/aia*{0}*t{1:%H?%M}*lev1?fits'\
.format(display_wlen, date))
if isinstance(displaymap, list):
displaymap = displaymap[0]
displaymap = aiaprep(displaymap)
displaymap /= displaymap.exposure_time
fig.add_subplot(nrows, nmaps, 1, axisbg='k')
displaymap.plot()#*wlen_args, **wlen_kwargs)
plt.colorbar(orientation='horizontal')
if context_wlen and self.region != None:
context_plot = fig.add_subplot(nrows, 1, nrows)
contextmap = Map(data_dir+'{0}/{1:%Y/%m/%d}/aia*{0}*t{1:%H?%M}*lev1?fits'.format(context_wlen, date))
if isinstance(contextmap, list):
contextmap = contextmap[0]
x, y = self.region_coordinate['x'], self.region_coordinate['y']
contextmap = contextmap.submap([-1000, 1000], [y-300, y+300])
# Need to figure out how to get 'subimsize' from self. Use the default 150'' for now
#rect = patches.Rectangle([x-subdx, y-subdx], subimsize[0], subimsize[1], color='white', fill=False)
rect = patches.Rectangle([x-150, y-150], 300, 300, color='white',
fill=False)
contextmap.plot()#*ctxt_args, **ctxt_kwargs)
context_plot.add_artist(rect)
for m, thismap in extra_maps:
fig.add_subplot(nrows, nmaps, 3+m)
thismap.plot()#*extr_args, **extr_kwargs)
if save_output:
savedir = path.join(maps_dir, 'maps/{:%Y/%m/%d}'.format(date))
if not path.exists(savedir):
makedirs(savedir)
filename = path.join(maps_dir, '{%Y-%m-%dT%H:%M:%S}_with{}'.format(date, display_wlen))
plt.savefig(filename)
if self.region != None:
reg_dir = path.join(maps_dir,
'maps/region_maps/{}/'. format(self.region))
if not path.exists(reg_dir):
makedirs(reg_dir)
plt.savefig(path.join(reg_dir, '{:%Y-%m-%dT%H:%M:%S}'.format(date)))
plt.close()
else:
plt.show()
return
def plot(self, vmin=None, vmax=None, *args, **kwargs):
mean = np.nanmean(self.data, dtype=np.float64)
std = np.nanstd(self.data, dtype=np.float64)
if vmin is None:
vmin = mean - (2.0 * std)
if vmax is None:
vmax = mean + (2.0 * std)
GenericMap.plot(self, vmin=vmin, vmax=vmax, *args, **kwargs)
return
def save(self):
date = sunpy.time.parse_time(self.date)
if not path.exists(self.maps_dir):
makedirs(self.maps_dir)
fname = path.join(self.maps_dir,
'{:%Y-%m-%dT%H_%M_%S}.fits'.format(date))
alldata = np.zeros((self.shape[0], self.shape[1], self.n_params+1))
alldata[..., 0] = self.data
if self.n_params != 1:
fname = fname.replace('.fits', '_full.fits')
alldata[..., 1] = self.dem_width
alldata[..., 2] = self.emission_measure
alldata[..., -1] = self.goodness_of_fit
outmap = Map(alldata, self.meta.copy())
outmap.save(fname, clobber=True)
def min(self):
return np.nanmin(self.data)
def mean(self):
return np.nanmean(self.data, dtype='float64')
def max(self):
return np.nanmax(self.data)
def std(self):
return np.nanstd(self.data, dtype='float64')
def calculate_em(self, wlen='171', dz=100, model=False):
"""
Calculate an approximation of the coronal EmissionMeasure using a given
TemperatureMap object and a particular AIA channel.
Parameters
----------
tmap : CoronaTemps.temperature.TemperatureMap
A TemperatureMap instance containing coronal temperature data
wlen : {'94' | '131' | '171' | '193' | '211' | '335'}
AIA wavelength used to approximate the emission measure. '171', '193'
and '211' are most likely to provide reliable results. Use of other
channels is not recommended.
"""
# Load the appropriate temperature response function
tresp = read('/imaps/holly/home/ajl7/CoronaTemps/aia_tresp')
resp = tresp['resp{}'.format(wlen)]
# Get some information from the TemperatureMap and set up filenames, etc
tempdata = self.data.copy()
tempdata[np.isnan(tempdata)] = 0.0
date = sunpy.time.parse_time(self.date)
if not model:
data_dir = self.data_dir
fits_dir = path.join(data_dir, '{:%Y/%m/%d}/{}'.format(date, wlen))
filename = path.join(fits_dir,
'*{0:%Y?%m?%d}?{0:%H?%M}*fits'.format(date))
if wlen == '94': filename = filename.replace('94', '094')
# Load and appropriately process AIA data
filelist = glob.glob(filename)
if filelist == []:
print 'AIA data not found :('
return
aiamap = Map(filename)
aiamap.data /= aiamap.exposure_time
aiamap = aiaprep(aiamap)
aiamap = aiamap.submap(self.xrange, self.yrange)
else:
fname = '/imaps/holly/home/ajl7/CoronaTemps/data/synthetic/{}/model.fits'.format(wlen)
if wlen == '94': fname = fname.replace('94', '094')
aiamap = Map(fname)
# Create new Map and put EM values in it
emmap = Map(self.data.copy(), self.meta.copy())
indices = np.round((tempdata - 4.0) / 0.05).astype(int)
indices[indices < 0] = 0
indices[indices > 100] = 100
#print emmap.shape, indices.shape, tempdata.shape, aiamap.shape, resp.shape
emmap.data = np.log10(aiamap.data / resp[indices])
#emmap.data = aiamap.data / resp[indices]
emmapcubehelix = _cm.cubehelix(s=2.8, r=-0.7, h=1.4, gamma=1.0)
cm.register_cmap(name='emhelix', data=emmapcubehelix)
emmap.cmap = cm.get_cmap('emhelix')
return emmap
sunpy.map.Map.register(TemperatureMap, TemperatureMap.is_datasource_for)
if __name__ == "__main__":
date = sunpy.time.parse_time(argv[1])
infofile = argv[2]
tmap = TemperatureMap(date, infofile=infofile)
tmap.save()
image_dir = open(infofile).readline()[:-1]
fname = path.join(image_dir, '{:%Y-%m-%dT%H_%M_%S}'.format(date))
print "Temperature map image saved to: {}".format(fname)
fig = plt.figure(16, 12)
tmap.plot()
plt.colorbar(orientation='vertical')
plt.savefig(fname)
plt.close()
|
bsd-2-clause
|
jayhetee/mpld3
|
examples/drag_points.py
|
19
|
2499
|
"""
Draggable Points Example
========================
This example shows how a D3 plugin can be created to make plot elements
draggable. A stopPropagation command is used to allow the drag behavior
and pan/zoom behavior to work in tandem.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import mpld3
from mpld3 import plugins, utils
class DragPlugin(plugins.PluginBase):
JAVASCRIPT = r"""
mpld3.register_plugin("drag", DragPlugin);
DragPlugin.prototype = Object.create(mpld3.Plugin.prototype);
DragPlugin.prototype.constructor = DragPlugin;
DragPlugin.prototype.requiredProps = ["id"];
DragPlugin.prototype.defaultProps = {}
function DragPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
mpld3.insert_css("#" + fig.figid + " path.dragging",
{"fill-opacity": "1.0 !important",
"stroke-opacity": "1.0 !important"});
};
DragPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var drag = d3.behavior.drag()
.origin(function(d) { return {x:obj.ax.x(d[0]),
y:obj.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
obj.elements()
.data(obj.offsets)
.style("cursor", "default")
.call(drag);
function dragstarted(d) {
d3.event.sourceEvent.stopPropagation();
d3.select(this).classed("dragging", true);
}
function dragged(d, i) {
d[0] = obj.ax.x.invert(d3.event.x);
d[1] = obj.ax.y.invert(d3.event.y);
d3.select(this)
.attr("transform", "translate(" + [d3.event.x,d3.event.y] + ")");
}
function dragended(d) {
d3.select(this).classed("dragging", false);
}
}
"""
def __init__(self, points):
if isinstance(points, mpl.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "drag",
"id": utils.get_id(points, suffix)}
fig, ax = plt.subplots()
np.random.seed(0)
points = ax.plot(np.random.normal(size=20),
np.random.normal(size=20), 'or', alpha=0.5,
markersize=50, markeredgewidth=1)
ax.set_title("Click and Drag", fontsize=18)
plugins.connect(fig, DragPlugin(points[0]))
mpld3.show()
|
bsd-3-clause
|
TakayukiSakai/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/classifier_test.py
|
4
|
2674
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
def iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.cast(
tf.reshape(
tf.constant(iris.data), [-1, 4]), tf.float32)
target = tf.cast(
tf.reshape(
tf.constant(iris.target), [-1]), tf.int64)
return features, target
def logistic_model_fn(features, target, unused_mode):
target = tf.one_hot(target, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target)
predictions = est.predict(x=iris.data)
predictions_proba = est.predict_proba(x=iris.data)
self.assertEqual(predictions.shape[0], iris.target.shape[0])
self.assertAllClose(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)
self.assertEqual(predictions.shape[0], iris.target.shape[0])
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
yaukwankiu/armor
|
colourbar.py
|
1
|
2351
|
from matplotlib import pyplot
import matplotlib as mpl
colourbar = {
65 : [255 ,255,255],
60 : [159 , 49 , 206],
55 : [255 , 0 ,255],
50 : [206 , 0 , 0],
45 : [255 , 0 , 0],
40 : [255 , 99 , 99],
35 : [255 , 148 , 0],
30 : [231 , 198 , 0],
25 : [255 , 255, 0],
20 : [ 0 , 148, 0 ],
15 : [ 0 , 173 , 0 ],
10 : [ 0 , 206 , 0 ],
5 : [ 0, 0, 255], # VV i made these up: VV
0 : [ 0, 99, 255],
-5 : [ 0, 198, 255],
-10 : [156 ,156 , 156],
}
bounds = range(-10, 75, 5)
lowers = sorted(colourbar.keys())
cmap = mpl.colors.ListedColormap([[
1.*colourbar[v][0]/255,
1.*colourbar[v][1]/255,
1.*colourbar[v][2]/255
] for v in lowers
]) # [[0., .4, 1.], [0., .8, 1.], [1., .8, 0.], [1., .4, 0.]]
"""
cmap.set_over((1.*colourbar[65][0]/255,
1.*colourbar[65][1]/255,
1.*colourbar[65][2]/255))
"""
cmap.set_over((0,0,0)) #black!!
cmap.set_under((1.*colourbar[-10][0]/255,
1.*colourbar[-10][1]/255,
1.*colourbar[-10][2]/255))
"""
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
#fig = pyplot.figure()
#ax3 = fig.add_axes()
cb3 = mpl.colorbar.ColorbarBase(ax3, cmap=cmap,
norm=norm,
boundaries=[-10]+bounds+[10],
extend='both',
# Make the length of each extension
# the same as the length of the
# interior colors:
#extendfrac='auto',
ticks=bounds,
spacing='uniform',
orientation='horizontal'
)
cb3.set_label('Custom extension lengths, some other units')
"""
|
cc0-1.0
|
amozie/amozie
|
studzie/keras_rl_agent/dqn_test.py
|
1
|
2269
|
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
# env = gym.make('MountainCar-v0')
env = gym.make('CartPole-v1')
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9,
enable_dueling_network=False, dueling_type='avg', target_model_update=1e-2, policy=policy)
# dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
# enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=.001, decay=.001), metrics=['mae'])
rewards = []
callback = [TrainEpisodeLogger(), History()]
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
rewards.extend(hist.history.get('episode_reward'))
plt.plot(rewards)
dqn.test(env, nb_episodes=5, visualize=True)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(300):
state_list.append(state)
# action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
env.render()
env.render(close=True)
state_arr = np.array(state_list)
plt.plot(state_arr)
|
apache-2.0
|
wogsland/QSTK
|
build/lib.linux-x86_64-2.7/QSTK/qstkutil/qsdateutil.py
|
5
|
9008
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: [email protected]
@summary: Contains tutorial for backtester and report.
'''
import datetime as dt
from datetime import timedelta
import time as t
import numpy as np
import os
import pandas as pd
def _cache_dates():
''' Caches dates '''
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure you have NYSE_dates.txt in the qstkutil directory"
datestxt = np.loadtxt(filename, dtype=str)
dates = []
for i in datestxt:
dates.append(dt.datetime.strptime(i, "%m/%d/%Y"))
return pd.TimeSeries(index=dates, data=dates)
GTS_DATES = _cache_dates()
def getMonthNames():
return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'])
def getYears(funds):
years=[]
for date in funds.index:
if(not(date.year in years)):
years.append(date.year)
return(years)
def getMonths(funds,year):
months=[]
for date in funds.index:
if((date.year==year) and not(date.month in months)):
months.append(date.month)
return(months)
def getDays(funds,year,month):
days=[]
for date in funds.index:
if((date.year==year) and (date.month==month)):
days.append(date)
return(days)
def getDaysBetween(ts_start, ts_end):
days=[]
for i in range(0,(ts_end-ts_start).days):
days.append(ts_start+timedelta(days=1)*i)
return(days)
def getFirstDay(funds,year,month):
for date in funds.index:
if((date.year==year) and (date.month==month)):
return(date)
return('ERROR')
def getLastDay(funds,year,month):
return_date = 'ERROR'
for date in funds.index:
if((date.year==year) and (date.month==month)):
return_date = date
return(return_date)
def getNextOptionClose(day, trade_days, offset=0):
#get third friday in month of day
#get first of month
year_off=0
if day.month+offset > 12:
year_off = 1
offset = offset - 12
first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16)
#get weekday
day_num = first.weekday()
#get first friday (friday - weekday) add 7 if less than 1
dif = 5 - day_num
if dif < 1:
dif = dif+7
#move to third friday
dif = dif + 14
friday = first+dt.timedelta(days=(dif-1))
#if friday is a holiday, options expire then
if friday in trade_days:
month_close = first + dt.timedelta(days=dif)
else:
month_close = friday
#if day is past the day after that
if month_close < day:
return_date = getNextOptionClose(day, trade_days, offset=1)
else:
return_date = month_close
return(return_date)
def getLastOptionClose(day, trade_days):
start = day
while getNextOptionClose(day, trade_days)>=start:
day= day - dt.timedelta(days=1)
return(getNextOptionClose(day, trade_days))
def getNYSEoffset(mark, offset):
''' Returns NYSE date offset by number of days '''
mark = mark.replace(hour=0, minute=0, second=0, microsecond=0)
i = GTS_DATES.index.searchsorted(mark, side='right')
# If there is no exact match, take first date in past
if GTS_DATES[i] != mark:
i -= 1
ret = GTS_DATES[i + offset]
ret = ret.replace(hour=16)
return ret
def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31),
timeofday = dt.timedelta(0)):
"""
@summary: Create a list of timestamps between startday and endday (inclusive)
that correspond to the days there was trading at the NYSE. This function
depends on a separately created a file that lists all days since July 4,
1962 that the NYSE has been open, going forward to 2020 (based
on the holidays that NYSE recognizes).
@param startday: First timestamp to consider (inclusive)
@param endday: Last day to consider (inclusive)
@return list: of timestamps between startday and endday on which NYSE traded
@rtype datetime
"""
start = startday - timeofday
end = endday - timeofday
dates = GTS_DATES[start:end]
ret = [x + timeofday for x in dates]
return(ret)
def getNextNNYSEdays(startday, days, timeofday):
"""
@summary: Create a list of timestamps from startday that is days days long
that correspond to the days there was trading at NYSE. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates=[]
for i in datestxt:
if(len(dates)<days):
if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday):
dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)
return(dates)
def getPrevNNYSEday(startday, timeofday):
"""
@summary: This function returns the last valid trading day before the start
day, or returns the start day if it is a valid trading day. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
#''' Set return to first day '''
dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday
#''' Loop through all but first '''
for i in datestxt[1:]:
dtNext = dt.datetime.strptime(i,"%m/%d/%Y")
#''' If we are > startday, then use previous valid day '''
if( dtNext > startday ):
break
dtReturn = dtNext + timeofday
return(dtReturn)
def ymd2epoch(year, month, day):
"""
@summary: Convert YMD info into a unix epoch value.
@param year: The year
@param month: The month
@param day: The day
@return epoch: number of seconds since epoch
"""
return(t.mktime(dt.date(year,month,day).timetuple()))
def epoch2date(ts):
"""
@summary Convert seconds since epoch into date
@param ts: Seconds since epoch
@return thedate: A date object
"""
tm = t.gmtime(ts)
return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday))
def _trade_dates(dt_start, dt_end, s_period):
'''
@summary: Generate dates on which we need to trade
@param c_strat: Strategy config class
@param dt_start: Start date
@param dt_end: End date
'''
ldt_timestamps = getNYSEdays(dt_start,
dt_end, dt.timedelta(hours=16) )
# Use pandas reindex method instead
# Note, dates are index as well as values, we select based on index
# but return values since it is a numpy array of datetimes instead of
# pandas specific.
ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps)
# These are the dates we want
if s_period[:2] == 'BW':
# special case for biweekly
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period[1:])
dr_range = np.asarray(dr_range)
li_even = np.array(range(len(dr_range)))
dr_range = dr_range[li_even[li_even % 2 == 0]]
else:
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period)
dr_range = np.asarray(dr_range)
# Warning, we MUST copy the date range, if we modify it it will be returned
# in it's modified form the next time we use it.
dr_range = np.copy(dr_range)
dr_range += pd.DateOffset(hours=16)
ts_dates = ts_dates.reindex( dr_range, method='bfill' )
ldt_dates = ts_dates[ts_dates.notnull()].values
#Make unique
sdt_unique = set()
ldt_dates = [x for x in ldt_dates
if x not in sdt_unique and not sdt_unique.add(x)]
return ldt_dates
|
bsd-3-clause
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/preprocessing/data.py
|
5
|
94481
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[ 0. 0. ]
[ 0.25 0.25]
[ 0.5 0.5 ]
[ 1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[ 1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>>
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[ 0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[ 3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the ``axis`` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the ``transform``
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
StandardScaler : perform standardization that is faster, but less robust
to outliers.
RobustScaler : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = list(map(lambda x: x * 100, self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
Returns self
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
# older version of scipy do not handle tuple as fill_value
# clipping the value before transform solve the issue
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do assending, and the
# lower for descending). We take the mean of these two
X_col = .5 * (np.interp(X_col, quantiles, self.references_)
- np.interp(-X_col, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=[np.float64, np.float32])
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
if (not accept_sparse_negative and not self.ignore_implicit_zeros and
(sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts non-negative'
' sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
scale : perform standardization that is faster, but less robust
to outliers.
robust_scale : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
|
mit
|
ahye/FYS2140-Resources
|
src/TUSL/infinitewell.py
|
1
|
3632
|
####################################################################################
###
### Program to find eigenenergies of the infinite square well.
###
####################################################################################
# Importing useful stuff
from numpy import *
from matplotlib.pyplot import *
import scipy.integrate
import numpy as np
import matplotlib.pyplot as plt
# Defining potential
def infinite_well(z):
W = zeros(len(z))
return W
# Constants and parameters
N = 500 # number of points
z = np.linspace(0,1,N) # position array
dz = z[1]-z[0] # step length
tol = 0.1 # tolerance level
W = infinite_well(z) # getting potential
a = 0.4 # width of well [nm]
hbarc = 197.3 # eV nm
mc2 = 0.511*10**6 # eV
Psi = np.zeros(N) # wave function
Psi[0] = 0 # initial condition (function must die in endpoints)
Psi[1] = 0.1 # initial condition
epsilon = [] # list to be filled with epsilon
epsilon_anal = [] # analtyic energy list to be filled
E_n = [] # analytical energies
E = [] # numerical energies
lastpsi = [] # value of last psi
Psi_list = [] # list to store the best Psi
epsilon_trial = 9 # trial eigenvalue
# For plotting numerical solutions with index
number = 0 # in use when labelling wavefunctions in plot
colors = 'cmygbcmygb' # for different colors in plot
color_index = 0
# Search for correct eigenvalue
while epsilon_trial < 160:
# Calculating wave function
for j in range(1,N-1):
Psi[j+1] = (2 - dz**2*(epsilon_trial-W[j+1]))*Psi[j] - Psi[j-1]
# Normalizing
Psi /= sqrt(scipy.integrate.simps(abs(Psi)**2,dx=1e-3))
# Store value of last element in Psi
Psi_end = abs(Psi[-1])
# Check if last element is within tolerance
if Psi_end < tol:
epsilon.append(epsilon_trial)
lastpsi.append(Psi_end)
Psi_list.append(list(Psi)) # add as list to make it behave well
# Only keep those epsilon and Psi giving minimal value of Psi[-1]
if len(lastpsi) > 1 and (epsilon[-1] - epsilon[-2]) < 2:
if lastpsi[-1] < lastpsi[-2]:
lastpsi.remove(lastpsi[-2])
epsilon.remove(epsilon[-2])
Psi_list.remove(Psi_list[-2])
if lastpsi[-1] > lastpsi[-2]:
lastpsi.remove(lastpsi[-1])
epsilon.remove(epsilon[-1])
Psi_list.remove(Psi_list[-1])
# Update trial eigenvalue
epsilon_trial += 0.4
# Physical energies
for i in range(0,len(epsilon)):
eps = epsilon[i]
E_phys = eps*hbarc**2/(2*mc2*a**2)
E.append(E_phys)
# ANALYTIC SOLUTIONS
num = [1,2,3,4]
# Determining energy and wavefunction:
for n in num:
E_physical = n**2*hbarc**2*pi**2/(2*mc2*a**2)
E_n.append(E_physical)
Psi_anal = sin(pi*z*n)
# Normalizing:
Psi_anal /= sqrt(scipy.integrate.simps(abs(Psi_anal)**2,dx=1e-3))
plot(z,Psi_anal,'k--')
# Print lists of energies
print '-------------------------------------------------------------------------------------------------'
print 'Energy levels of infinite potential well of width %.2f nm:' %a
print '-------------------------------------------------------------------------------------------------'
print 'Epsilon: ',epsilon
print 'Numerical energies E [eV]: ', E
print 'Analytical energies En [eV]: ', E_n
print '-------------------------------------------------------------------------------------------------'
# Plotting
for i in range(len(Psi_list)):
Legend = '$\psi_%d$' % (number)
plot(z,Psi_list[i],color=colors[color_index],label=Legend)
number += 1
color_index += 1
# Axes and title
plt.title('$Infinite \ well$',size=20)
plt.xlabel('$z = x/a$',size=18)
plt.ylabel('$\psi(z)$',size=18)
plot([0,0],[0,0],'k--',label='$Analytical$')
plt.legend(loc='best')
show()
|
mit
|
PECOS-KNL/kernels
|
knl-paper/matplotlib_scripts/DNS_Parallelism.py
|
1
|
1729
|
#!/bin/py
#This python script generate plots of one point statistics with experimental data
import numpy as np
import scipy
import h5py as h5
import math
import glob
import pylab
import sys
import shutil
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import MaxNLocator
from numpy import matrix
from numpy import linalg
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# rcParams['xtick.major.size']=20
# rcParams['ytick.major.size']=20
rcParams['xtick.minor.size']=0
# rcParams['ytick.minor.size']=10
rcParams['xtick.labelsize']=10
# rcParams['ytick.labelsize']=30
# rcParams['contour.negative_linestyle'] = 'solid'
Cores_256=[1, 2, 4, 8, 16, 32, 64, 128, 256]
Cores_128=[1, 2, 4, 8, 16, 32, 64, 128]
Cores_64=[1, 2, 4, 8, 16, 32, 64]
CPU_256=[86.0533051, 35.8494295, 15.6884386, 8.7995897, 3.4137126, 2.0844045, 1.6152648, 1.6390807, 1.4453344]
CPU_128=[78.8591899, 31.5992704, 12.5380067, 8.7481251, 3.609148, 2.0546135, 1.9851383, 2.0864243]
CPU_64=[74.4944213, 27.9063423, 10.7003363, 5.691174, 2.9290911, 2.7930199, 2.6874417]
fig=plt.figure(1,figsize=(6,4))
plt.plot(Cores_64,CPU_64,'b-o',label=r'MPI$\times$OpenMP=64')
plt.plot(Cores_128,CPU_128,'r-s',label=r'MPI$\times$OpenMP=128')
plt.plot(Cores_256,CPU_256,'g-^',label=r'MPI$\times$OpenMP=256')
plt.xscale('log')
plt.xlim([0.5,512])
plt.yscale('log')
plt.legend(frameon=False,numpoints=1,fontsize=12)
plt.xticks(Cores_256)
xlabels = ["1","2","4", "8", "16", "32", "64","128","256"]
fig.gca().set_xticklabels(xlabels)
plt.xlabel('MPI tasks')
plt.ylabel('Elapsed time (sec)')
plt.savefig('DNS_Parallelism.pdf',bbox_inches='tight')
plt.close()
|
lgpl-2.1
|
wasade/networkx
|
doc/make_examples_rst.py
|
35
|
5461
|
"""
generate the rst files for the examples by iterating over the networkx examples
"""
# This code was developed from the Matplotlib gen_rst.py module
# and is distributed with the same license as Matplotlib
from __future__ import print_function
import os, glob
import os
import re
import sys
#fileList = []
#rootdir = '../../examples'
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
TODO: this check isn't adequate in some cases. Eg, if we discover
a bug when building the examples, the original and derived
will be unchanged but we still want to fource a rebuild. We can
manually remove from _static, but we may need another solution
"""
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def main(exampledir,sourcedir):
noplot_regex = re.compile(r"#\s*-\*-\s*noplot\s*-\*-")
datad = {}
for root, subFolders, files in os.walk(exampledir):
for fname in files:
if ( fname.startswith('.') or fname.startswith('#') or fname.startswith('_') or
fname.find('.svn')>=0 or not fname.endswith('.py') ):
continue
fullpath = os.path.join(root,fname)
contents = file(fullpath).read()
# indent
relpath = os.path.split(root)[-1]
datad.setdefault(relpath, []).append((fullpath, fname, contents))
subdirs = datad.keys()
subdirs.sort()
output_dir=os.path.join(sourcedir,'examples')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fhindex = file(os.path.join(sourcedir,'examples','index.rst'), 'w')
fhindex.write("""\
.. _examples-index:
*****************
NetworkX Examples
*****************
.. only:: html
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 2
""")
for subdir in subdirs:
output_dir= os.path.join(sourcedir,'examples',subdir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
static_dir = os.path.join(sourcedir, 'static', 'examples')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
subdirIndexFile = os.path.join(subdir, 'index.rst')
fhsubdirIndex = file(os.path.join(output_dir,'index.rst'), 'w')
fhindex.write(' %s\n\n'%subdirIndexFile)
#thumbdir = '../_static/plot_directive/mpl_examples/%s/thumbnails/'%subdir
#for thumbname in glob.glob(os.path.join(thumbdir,'*.png')):
# fhindex.write(' %s\n'%thumbname)
fhsubdirIndex.write("""\
.. _%s-examples-index:
##############################################
%s
##############################################
.. only:: html
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 1
"""%(subdir, subdir.title()))
data = datad[subdir]
data.sort()
#parts = os.path.split(static_dir)
#thumb_dir = ('../'*(len(parts)-1)) + os.path.join(static_dir, 'thumbnails')
for fullpath, fname, contents in data:
basename, ext = os.path.splitext(fname)
static_file = os.path.join(static_dir, fname)
#thumbfile = os.path.join(thumb_dir, '%s.png'%basename)
#print ' static_dir=%s, basename=%s, fullpath=%s, fname=%s, thumb_dir=%s, thumbfile=%s'%(static_dir, basename, fullpath, fname, thumb_dir, thumbfile)
rstfile = '%s.rst'%basename
outfile = os.path.join(output_dir, rstfile)
fhsubdirIndex.write(' %s\n'%rstfile)
if (not out_of_date(fullpath, static_file) and
not out_of_date(fullpath, outfile)):
continue
print('%s/%s' % (subdir,fname))
fhstatic = file(static_file, 'w')
fhstatic.write(contents)
fhstatic.close()
fh = file(outfile, 'w')
fh.write('.. _%s-%s:\n\n'%(subdir, basename))
base=fname.partition('.')[0]
title = '%s'%(base.replace('_',' ').title())
#title = '<img src=%s> %s example code: %s'%(thumbfile, subdir, fname)
fh.write(title + '\n')
fh.write('='*len(title) + '\n\n')
pngname=base+".png"
png=os.path.join(static_dir,pngname)
linkname = os.path.join('..', '..', 'static', 'examples')
if os.path.exists(png):
fh.write('.. image:: %s \n\n'%os.path.join(linkname,pngname))
linkname = os.path.join('..', '..', '_static', 'examples')
fh.write("[`source code <%s>`_]\n\n::\n\n" % os.path.join(linkname,fname))
# indent the contents
contents = '\n'.join([' %s'%row.rstrip() for row in contents.split('\n')])
fh.write(contents)
# fh.write('\n\nKeywords: python, matplotlib, pylab, example, codex (see :ref:`how-to-search-examples`)')
fh.close()
fhsubdirIndex.close()
fhindex.close()
if __name__ == '__main__':
import sys
try:
arg0,arg1,arg2=sys.argv[:3]
except:
arg0=sys.argv[0]
print("""
Usage: %s exampledir sourcedir
exampledir: a directory containing the python code for the examples.
sourcedir: a directory to put the generated documentation source for these examples.
""" % (arg0))
else:
main(arg1,arg2)
|
bsd-3-clause
|
seaotterman/tensorflow
|
tensorflow/examples/learn/wide_n_deep_tutorial.py
|
29
|
8985
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from six.moves import urllib
import pandas as pd
import tensorflow as tf
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
# Sparse base columns.
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=["female", "male"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age,
boundaries=[
18, 25, 30, 35, 40, 45,
50, 55, 60, 65
])
# Wide columns and deep columns.
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
if model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50],
fix_global_step_increment_bug=True)
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
skipinitialspace=True,
skiprows=1,
engine="python")
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir, model_type)
m.fit(input_fn=lambda: input_fn(df_train), steps=train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
rabimba/ns-3.18
|
src/core/examples/sample-rng-plot.py
|
188
|
1246
|
# -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
gpl-2.0
|
codingpoets/tigl
|
misc/math-scripts/ms_optAlgs.py
|
2
|
7709
|
#
# Copyright (C) 2007-2013 German Aerospace Center (DLR/SC)
#
# Created: 2012-12-17 Martin Siggel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @file ms_optAlgs.py
# @brief Implementation of some basic optimization algorithms
#
#import pkg_resources
#pkg_resources.require("matplotlib")
from numpy import *
import matplotlib.pyplot as plt
def mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,direc):
# 2010 [email protected]
# backtracking line search using armijo criterion
#
# objFunc - handle for objective function
# objFuncValue - current objective function value @ x
# x - x
# dx - dx
# dir - search direction
#
# example : mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,dir)
alphaMax = 1.; # this is the maximum step length
alpha = alphaMax;
fac = 1./2.; # < 1 reduction factor of alpha
c_1 = 1e-1;
while objFunc(x+alpha*direc) > objFuncValue + c_1*alpha*dot(direc,dx):
alpha = fac*alpha;
if alpha < 10*spacing(1):
raise NameError('Error in Line search - alpha close to working precision');
return alpha
def mb_quadraticApproximationLineSearch(objFunc,objFuncValue,x,dx,direc,alpha_in):
# 2010 [email protected]
# line search using a quadratic approximation we are setting to minimum of
# the quadratic function phi(t) = a*t^2+b*t+c which is determined by three
# points: objFunc(x), gradFunc'(x) and objFunc(x+alpha*dir)
#
# objFunc - handle for objective function
# objFuncValue - current objective function value @ x
# x - x
# dx - dx
# dir - search direction
#
# example : mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,dir)
alpha = alpha_in;
c = objFuncValue;
b = dot(direc,dx)
ofprime = objFunc(x+alpha*direc)
a = (ofprime - b*alpha - c) / alpha**2;
alpha = - b / (2*a);
ofprime = objFunc(x+alpha*direc)
numOfQuadApprox = 0;
c_1 = 1e-1;
# check if armijo criterion fulfilled
while ofprime > objFuncValue + c_1*alpha*dot(direc,dx):
numOfQuadApprox = numOfQuadApprox + 1;
a = ( ofprime - b*alpha - c) / alpha**2;
alpha = - b / (2*a);
ofprime =objFunc(x+alpha*direc)
if numOfQuadApprox > 10:
print 'Error in Line search - quadraric approximation failed more than 10 times\n Starting backtracking line search\n'
return mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,direc)
return alpha
def ms_optSteepestDescent(objFunc, gradFunc, x_start):
return ms_optCG(objFunc, gradFunc, x_start, 'gradient')
# conjugate gradient, fletcher reeves
def ms_optCG(objFunc, gradFunc, x_start, type='fr'):
x = x_start;
oldx = x;
objFuncValue = objFunc(x);
oldObjFuncValue = objFuncValue * 2.;
alpha = 1.
dx = gradFunc(x);
direc = -dx
# iterate
iteration = 0;
numOfIter = 100;
prec = 1e-7;
# convergence if gradient smaller than prec, change in objective function
# smaller than prec or maximum number of iteration reached...
while (iteration < numOfIter) and (abs((oldObjFuncValue-objFuncValue)/max(1,objFuncValue))>prec) and (linalg.norm(dx)>prec):
# iteration counter
iteration = iteration + 1;
alpha = mb_quadraticApproximationLineSearch(objFunc,objFuncValue,x,dx,direc,alpha);
#alpha = mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,direc);
# update x
x = x + alpha*direc;
plt.plot([x[0], oldx[0]], [x[1], oldx[1]],'r');
oldx = x
# update obj func values
oldObjFuncValue = objFuncValue;
objFuncValue = objFunc(x);
# update dx
oldDx = dx;
dx = gradFunc(x);
# fletcher reeves
if type == 'fr':
beta = dot(dx,dx)/dot(oldDx, oldDx)
# hestenes stiefel
elif type == 'hs':
beta = dot(dx, dx-oldDx)/dot(direc, dx-oldDx)
# polak rebiere
elif type == 'pr':
beta = dot(dx, dx-oldDx)/dot(oldDx, oldDx)
else:
beta = 0.
# update search direction
direc = -dx + direc * beta
print 'Iter {}: of = {} @ x = {} alpha = {}.'.format(iteration, objFuncValue, x, alpha)
return x
def ms_optNewton(objFunc, gradFunc, hessFunc, x_start):
x = x_start;
oldx = x;
objFuncValue = objFunc(x);
oldObjFuncValue = objFuncValue * 2. + 1;
dx = gradFunc(x);
hess = hessFunc(x);
alpha = 1.
direc = -linalg.solve(hess, dx)
# iterate
iteration = 0;
numOfIter = 100;
prec = 1e-7;
# convergence if gradient smaller than prec, change in objective function
# smaller than prec or maximum number of iteration reached...
while (iteration < numOfIter) and (abs((oldObjFuncValue-objFuncValue)/max(1,objFuncValue))>prec) and (linalg.norm(dx)>prec):
# iteration counter
iteration = iteration + 1;
if dot(dx,direc) >= 0:
print 'Warning: hessian not positive definite. Go along gradient'
direc = -dx
alpha = mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,direc);
# update x
x = x + alpha*direc;
plt.plot([x[0], oldx[0]], [x[1], oldx[1]],'r');
oldx = x
# update obj func values
oldObjFuncValue = objFuncValue;
objFuncValue = objFunc(x);
# update dx
dx = gradFunc(x);
hess = hessFunc(x);
# update search direction
direc = -linalg.solve(hess, dx)
print 'Iter {}: of = {} @ x = {} alpha = {}.'.format(iteration, objFuncValue, x, alpha)
return x
def ms_optQuasiNewton(objFunc, gradFunc, x_start, type='bfgs'):
x = x_start;
oldx = x;
objFuncValue = objFunc(x);
oldObjFuncValue = objFuncValue * 2.;
dx = gradFunc(x);
hess = eye(x.size);
direc = -linalg.solve(hess, dx)
# iterate
iteration = 0;
numOfIter = 100;
prec = 1e-7;
alpha = 1.
# convergence if gradient smaller than prec, change in objective function
# smaller than prec or maximum number of iteration reached...
while (iteration < numOfIter) and (abs((oldObjFuncValue-objFuncValue)/max(1,objFuncValue))>prec) and (linalg.norm(dx)>prec):
# iteration counter
iteration = iteration + 1;
alpha = mb_backtrackingLineSearch(objFunc,objFuncValue,x,dx,direc);
#alpha = mb_quadraticApproximationLineSearch(objFunc,objFuncValue,x,dx,direc,alpha);
# update x
p = alpha*direc
x = x + p;
plt.plot([x[0], oldx[0]], [x[1], oldx[1]],'r');
oldx = x
# update obj func values
oldObjFuncValue = objFuncValue;
objFuncValue = objFunc(x);
# update dx
oldDx = dx;
dx = gradFunc(x);
# calculate difference of gradients
q = dx-oldDx;
# update hessian
if type=='bfgs':
hess = hess + outer(q,q)/dot(q,p) - outer(dot(hess,p), dot(hess,p))/dot(p, dot(hess, p))
elif type=='sr1':
hess = hess + outer(q-dot(hess,p), q-dot(hess,p))/dot(q-dot(hess,p),p);
# update search direction
direc = -linalg.solve(hess, dx)
print 'Iter {}: of = {} @ x = {} alpha = {}.'.format(iteration, objFuncValue, x, alpha)
return x
def ms_numGrad(objfun, x, h):
fcur = objfun(x)
dx = x * 0
for i in range(0,size(x)):
xnew = copy(x)
xnew[i] = x[i] + h
dx[i] = (objfun(xnew) - fcur)/h
return dx
def ms_numHess(objGrad, x, h):
curdx = objGrad(x)
H = zeros((size(x), size(x)))
for i in range(0,size(x)):
xnew = copy(x)
xnew[i] = x[i] + h
H[i,:] = (objGrad(xnew) - curdx)/h
return H
|
apache-2.0
|
SuLab/scheduled-bots
|
scheduled_bots/disease_ontology/mesh_changes.py
|
1
|
9994
|
"""
Instead of the change detector looking at each revision for an item
what i want here, is to compare the current state of an item's key/value pairs that I define, with another
set of data (a reference dataset, from an owl/obographs json file)
Steps:
- Does a sparql query against wikidata to get all mesh IDs on all items with a DOID. Looks for a mapping relation type (P4390)
if available. If no mapping rel type is specified, default to oboInOwl:hasDbXref
- Sparql query against the latest doid.owl release file looking for mesh terms using the relations:
{skos:closeMatch skos:narrowMatch skos:broadMatch skos:relatedMatch skos:exactMatch oboInOwl:hasDbXref}
- Compare the mesh IDs on wd vs whats in DO. Returns a table listing all of the differences
"""
import subprocess
from collections import defaultdict
import pandas as pd
import requests
from rdflib import Graph
from rdflib import URIRef, Literal
from tqdm import tqdm
from wikidataintegrator.wdi_core import WDItemEngine
from wikidataintegrator.wdi_helpers import id_mapper
BIOPORTAL_KEY = "a1ac23bb-23cb-44cf-bf5e-bcdd7446ef37"
DOID_QID = id_mapper("P699")
DO_OWL_PATH = "doid.owl"
QID_MAP_REL_TYPE_CURIE = {'Q39893184': 'skos:closeMatch',
'Q39893967': 'skos:narrowMatch',
'Q39894595': 'skos:broadMatch',
'Q39894604': 'skos:relatedMatch',
'Q39893449': 'skos:exactMatch'}
QID_MAP_REL_TYPE_CURIE = defaultdict(lambda: "oboInOwl:hasDbXref", QID_MAP_REL_TYPE_CURIE)
"""
MAP_REL_TYPE_QID = {'http://www.w3.org/2004/02/skos/core#broadMatch': 'Q39894595',
'http://www.w3.org/2004/02/skos/core#closeMatch': 'Q39893184',
'http://www.w3.org/2004/02/skos/core#exactMatch': 'Q39893449',
'http://www.w3.org/2004/02/skos/core#narrowMatch': 'Q39893967',
'http://www.w3.org/2004/02/skos/core#relatedMatch': 'Q39894604'}
"""
PREFIX_TO_CURIE = {
'http://www.w3.org/2004/02/skos/core#': 'skos',
'http://www.geneontology.org/formats/oboInOwl#': 'oboInOwl'
}
purl_to_curie = lambda s: s.replace("http://purl.obolibrary.org/obo/", "").replace("_", ":")
curie_to_purl = lambda s: "http://purl.obolibrary.org/obo/" + s.replace(":", "_")
def get_wikidata_do_mesh():
# get mesh xrefs, and including mapping relation type
# {'DOID:0050856': {'skos:broadMatch_D019958'}}
query = """
select ?item ?doid ?mesh ?mesh_rt where {
?item wdt:P699 ?doid .
?item p:P486 ?mesh_s .
?mesh_s ps:P486 ?mesh .
optional { ?mesh_s pq:P4390 ?mesh_rt }
}"""
results = WDItemEngine.execute_sparql_query(query)['results']['bindings']
results = [{k: v['value'].replace("http://www.wikidata.org/entity/", "") for k, v in item.items()} for item in
results]
df = pd.DataFrame(results)
df['mesh_rt'] = df.apply(lambda row: QID_MAP_REL_TYPE_CURIE[row.mesh_rt] + "_MESH:" + row.mesh, axis=1)
df['_item'] = df['item']
r = df.groupby("_item").aggregate(lambda x: set(y for y in x if not pd.isnull(y))).to_dict("records")
wd = {list(x['doid'])[0]: x for x in r}
wd = {k: v['mesh_rt'] for k, v in wd.items()}
wd = {k: v for k, v in wd.items() if v}
return wd
def getConceptLabel(qid):
return getConceptLabels((qid,))[qid]
def getConceptLabels(qids):
qids = "|".join({qid.replace("wd:", "") if qid.startswith("wd:") else qid for qid in qids})
params = {'action': 'wbgetentities', 'ids': qids, 'languages': 'en', 'format': 'json', 'props': 'labels'}
r = requests.get("https://www.wikidata.org/w/api.php", params=params)
print(r.url)
r.raise_for_status()
wd = r.json()['entities']
return {k: v['labels']['en']['value'] for k, v in wd.items()}
def get_do_metadata():
# from the do owl file, get do labels, descriptions
g = Graph()
g.parse(DO_OWL_PATH)
disease_ontology = Literal('disease_ontology', datatype=URIRef('http://www.w3.org/2001/XMLSchema#string'))
query = """
SELECT * WHERE {
?id oboInOwl:hasOBONamespace ?disease_ontology .
?id rdfs:label ?label .
OPTIONAL {?id obo:IAO_0000115 ?descr}
FILTER NOT EXISTS {?id owl:deprecated ?dep}
}
"""
rows = g.query(query, initBindings={'disease_ontology': disease_ontology})
res = [{str(k): str(v) for k, v in binding.items()} for binding in rows.bindings]
df = pd.DataFrame(res)
df.drop_duplicates(subset=['id'], inplace=True)
df.fillna("", inplace=True)
do = df.to_dict("records")
do = {purl_to_curie(x['id']): x for x in do}
return do
def parse_do_owl():
"""
Parse xrefs and skos matches from owl file.
Returns dict. key: doid curie, value: set of xrefs in the format: relation type + "_" + xref. (ex: oboInOwl:hasDbXref_MESH:D007690)
:return:
"""
g = Graph()
g.parse(DO_OWL_PATH)
disease_ontology = Literal('disease_ontology', datatype=URIRef('http://www.w3.org/2001/XMLSchema#string'))
true = Literal('true', datatype=URIRef('http://www.w3.org/2001/XMLSchema#boolean'))
query = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?id ?rel_type ?xref WHERE {
?id oboInOwl:hasOBONamespace ?disease_ontology .
OPTIONAL {
values ?rel_type {skos:closeMatch skos:narrowMatch skos:broadMatch skos:relatedMatch skos:exactMatch oboInOwl:hasDbXref}
?id ?rel_type ?xref .
}
FILTER NOT EXISTS {?id owl:deprecated ?true}
}
"""
rows = g.query(query, initBindings={'disease_ontology': disease_ontology, 'true': true})
res = [{str(k): str(v) for k, v in binding.items()} for binding in rows.bindings]
df = pd.DataFrame(res)
df["doid"] = df["id"]
df.dropna(subset=['xref'], inplace=True)
df.rel_type = df.rel_type.apply(
lambda x: x.replace(x.split("#")[0] + "#", PREFIX_TO_CURIE[x.split("#")[0] + "#"] + ":"))
df.xref = df.apply(lambda row: row.rel_type + "_" + row.xref, axis=1)
r = df.groupby("id").aggregate(lambda x: set(y for y in x if not pd.isnull(y))).to_dict("records")
do = {purl_to_curie(list(x['doid'])[0]): x for x in r}
do = {k: v['xref'] for k, v in do.items()}
# filter mesh xrefs only
do = {k: set([x for x in v if "MESH:" in x]) for k, v in do.items()}
do = {k: v for k, v in do.items() if v}
# do['DOID:5570']
return do
def compare(wd, do):
# for each DO item, does wd have everything it should? What else does it have?
wd = defaultdict(set, wd)
do = defaultdict(set, do)
leftover_in_wd = dict()
leftover_in_do = dict()
doids = set(wd.keys()) | set(do.keys())
missing = []
for doid in doids:
leftover_in_wd[doid] = set()
leftover_in_do[doid] = set()
if doid not in wd:
missing.append(doid)
continue
leftover_in_wd[doid] = wd[doid] - do[doid]
leftover_in_do[doid] = do[doid] - wd[doid]
leftover_in_wd = {k: v for k, v in leftover_in_wd.items() if v}
leftover_in_do = {k: v for k, v in leftover_in_do.items() if v}
print("Items missing in wikidata: {}".format(missing))
return leftover_in_wd, leftover_in_do
def get_changes():
wd = get_wikidata_do_mesh()
do = parse_do_owl()
leftover_in_wd, leftover_in_do = compare(wd, do)
return leftover_in_wd, leftover_in_do
def get_mesh_info(mesh_id):
url = "http://data.bioontology.org/ontologies/MESH/classes/http%3A%2F%2Fpurl.bioontology.org%2Fontology%2FMESH%2F{}"
d = requests.get(url.format(mesh_id), params={'apikey': BIOPORTAL_KEY}).json()
if "errors" in d:
return {'mesh_label': '', 'mesh_descr': ''}
d = {'mesh_label': d['prefLabel'], 'mesh_descr': d['definition'], 'mesh_synonyms': ";".join(d['synonym'])}
d['mesh_descr'] = d['mesh_descr'][0] if d['mesh_descr'] else ''
return d
def get_mesh_changes(leftover_in_wd):
# from the things added to wikidata, make a table with the metadata about the change
# starting with things added to wd
mesh_info = []
mesh_url = "https://meshb.nlm.nih.gov/record/ui?ui={}"
do_metadata = get_do_metadata()
for doid, meshs in tqdm(leftover_in_wd.items()):
for mesh in meshs:
relation, mesh = mesh.split("_")
mesh = mesh.split(":")[1]
qid = DOID_QID[doid]
do_node = do_metadata.get(doid, dict())
x = {'qid': qid, 'wd_label': getConceptLabel(qid),
'doid': doid, 'do_label': do_node.get("label"), 'doid_url': curie_to_purl(doid),
'do_def': do_node.get("descr"),
'mesh': mesh, 'mesh_url': mesh_url.format(mesh),
'relation': relation}
x.update(get_mesh_info(mesh))
mesh_info.append(x)
df = pd.DataFrame(mesh_info)
df = df[['doid', 'do_label', 'do_def', 'doid_url', 'mesh', 'mesh_label',
'mesh_descr', 'mesh_synonyms', 'mesh_url', 'qid', 'wd_label', 'relation']]
print(df.head(2))
remove_me = df[df.mesh_label.isnull()]
if not remove_me.empty:
print("you should remove these")
print(remove_me)
# make a formatted df
df_fmt = df.copy()
df_fmt.doid = df_fmt.apply(lambda x: "[" + x.doid + "](" + x.doid_url + ")", 1)
del df_fmt['doid_url']
df_fmt.mesh = df_fmt.apply(lambda x: "[" + x.mesh + "](" + x.mesh_url + ")", 1)
del df_fmt['mesh_url']
df_fmt.qid = df_fmt.qid.apply(lambda x: "[" + x + "](https://www.wikidata.org/wiki/" + x + ")")
return df, df_fmt
def download_do_owl(release):
url = "https://github.com/DiseaseOntology/HumanDiseaseOntology/raw/master/src/ontology/releases/{}/doid.owl"
subprocess.check_call(["wget", "-N", url.format(release)])
def main(release):
# release = "2017-11-28"
download_do_owl(release)
leftover_in_wd, leftover_in_do = get_changes()
df, df_fmt = get_mesh_changes(leftover_in_wd)
return df, df_fmt
|
mit
|
potash/scikit-learn
|
sklearn/tests/test_base.py
|
16
|
11355
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import sys
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.utils import deprecated
from sklearn.base import TransformerMixin
from sklearn.utils.mocking import MockDataFrame
import pickle
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class ModifyInitParams(BaseEstimator):
"""Deprecated behavior.
Equal parameters but with a type cast.
Doesn't fulfill a is a
"""
def __init__(self, a=np.array([0])):
self.a = a.copy()
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_copy_init_params():
# test for deprecation warning when copying or casting an init parameter
est = ModifyInitParams()
message = ("Estimator ModifyInitParams modifies parameters in __init__. "
"This behavior is deprecated as of 0.18 and support "
"for this behavior will be removed in 0.20.")
assert_warns_message(DeprecationWarning, message, clone, est)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
PY26 = sys.version_info[:2] == (2, 6)
if PY26:
# sp.dok_matrix can not be deepcopied in Python 2.6
sparse_matrix_classes.remove(sp.dok_matrix)
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline(
[('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
def test_clone_pandas_dataframe():
class DummyEstimator(BaseEstimator, TransformerMixin):
"""This is a dummy class for generating numerical features
This feature extractor extracts numerical features from pandas data
frame.
Parameters
----------
df: pandas data frame
The pandas data frame parameter.
Notes
-----
"""
def __init__(self, df=None, scalar_param=1):
self.df = df
self.scalar_param = scalar_param
def fit(self, X, y=None):
pass
def transform(self, X, y=None):
pass
# build and clone estimator
d = np.arange(10)
df = MockDataFrame(d)
e = DummyEstimator(df, scalar_param=1)
cloned_e = clone(e)
# the test
assert_true((e.df == cloned_e.df).values.all())
assert_equal(e.scalar_param, cloned_e.scalar_param)
class TreeNoVersion(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
class TreeBadVersion(DecisionTreeClassifier):
def __getstate__(self):
return dict(self.__dict__.items(), _sklearn_version="something")
def test_pickle_version_warning():
# check that warnings are raised when unpickling in a different version
# first, check no warning when in the same version:
iris = datasets.load_iris()
tree = DecisionTreeClassifier().fit(iris.data, iris.target)
tree_pickle = pickle.dumps(tree)
assert_true(b"version" in tree_pickle)
assert_no_warnings(pickle.loads, tree_pickle)
# check that warning is raised on different version
tree = TreeBadVersion().fit(iris.data, iris.target)
tree_pickle_other = pickle.dumps(tree)
message = ("Trying to unpickle estimator TreeBadVersion from "
"version {0} when using version {1}. This might lead to "
"breaking code or invalid results. "
"Use at your own risk.".format("something",
sklearn.__version__))
assert_warns_message(UserWarning, message, pickle.loads, tree_pickle_other)
# check that not including any version also works:
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert_false(b"version" in tree_pickle_noversion)
message = message.replace("something", "pre-0.18")
message = message.replace("TreeBadVersion", "TreeNoVersion")
# check we got the warning about using pre-0.18 pickle
assert_warns_message(UserWarning, message, pickle.loads,
tree_pickle_noversion)
# check that no warning is raised for external estimators
TreeNoVersion.__module__ = "notsklearn"
assert_no_warnings(pickle.loads, tree_pickle_noversion)
|
bsd-3-clause
|
kmike/scikit-learn
|
sklearn/decomposition/nmf.py
|
2
|
18353
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD
from __future__ import division
from math import sqrt
import warnings
import numbers
import numpy as np
from scipy.optimize import nnls
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import atleast2d_or_csr, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def _pos(x):
"""Positive part of a vector / matrix"""
return (x >= 0) * x
def _neg(x):
"""Negative part of a vector / matrix"""
neg_x = -x
neg_x *= x < 0
return neg_x
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
x = x.ravel()
return np.sqrt(np.dot(x.T, x))
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
Remarks
-------
This implements the algorithm described in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://scgroup.hpclab.ceid.upatras.gr/faculty/stratis/Papers/HPCLAB020107.pdf
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = _pos(x), _pos(y)
x_n, y_n = _neg(x), _neg(y)
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H_init, tol, max_iter):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H_init : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
"""
if (H_init < 0).any():
raise ValueError("Negative values in H_init passed to NLS solver.")
H = H_init
WtV = safe_sparse_dot(W.T, V, dense_output=True)
WtW = safe_sparse_dot(W.T, W, dense_output=True)
# values justified in the paper
alpha = 1
beta = 0.1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
proj_gradient = norm(grad[np.logical_or(grad < 0, H > 0)])
if proj_gradient < tol:
break
for inner_iter in range(1, 20):
Hn = H - alpha * grad
# Hn = np.where(Hn > 0, Hn, 0)
Hn = _pos(Hn)
d = Hn - H
gradd = np.sum(grad * d)
dQd = np.sum(np.dot(WtW, d) * d)
# magic numbers whoa
suff_decr = 0.99 * gradd + 0.5 * dQd < 0
if inner_iter == 1:
decr_alpha = not suff_decr
Hp = H
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to mantain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
`components_` : array, [n_components, n_features]
Non-negative components of the data.
`reconstruction_err_` : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[-0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
Notes
-----
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://scgroup.hpclab.ceid.upatras.gr/faculty/stratis/Papers/HPCLAB020107.pdf
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
if isinstance(init, (numbers.Integral, np.random.RandomState)):
random_state = check_random_state(init)
init = "random"
warnings.warn("Passing a random seed or generator as init "
"is deprecated and will be removed in 0.15. Use "
"init='random' and random_state instead.",
DeprecationWarning)
else:
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W, gradW, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = atleast2d_or_csr(X)
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < self.tol * init_grad:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
W = W.T
gradW = gradW.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
if not sp.issparse(X):
self.reconstruction_err_ = norm(X - np.dot(W, H))
else:
norm2X = np.sum(X.data ** 2) # Ok because X is CSR
normWHT = np.trace(np.dot(np.dot(H.T, np.dot(W.T, W)), H))
cross_prod = np.trace(np.dot((X * H.T).T, W))
self.reconstruction_err_ = sqrt(norm2X + normWHT
- 2. * cross_prod)
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit")
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = atleast2d_or_csr(X)
W = np.zeros((X.shape[0], self.n_components_))
for j in range(0, X.shape[0]):
W[j, :], _ = nnls(self.components_.T, X[j, :])
return W
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
khkaminska/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
206
|
7643
|
import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
|
bsd-3-clause
|
pyoceans/python-oceans
|
setup.py
|
2
|
1837
|
import os
from setuptools import find_packages, setup
import versioneer
rootpath = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return open(os.path.join(rootpath, *parts)).read()
email = "[email protected]"
maintainer = "Filipe Fernandes"
authors = ["André Palóczy", "Arnaldo Russo", "Filipe Fernandes"]
# Dependencies.
hard = ["gsw", "matplotlib", "numpy", "seawater"]
soft = {"full": ["cartopy", "iris", "netcdf4", "pandas", "scipy"]}
setup(
name="oceans",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(),
package_data={"oceans": ["colormaps/cmap_data/*.dat"]},
license="BSD-3-Clause",
long_description=f'{read("README.md")}',
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Education",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Misc functions for oceanographic data analysis",
author=authors,
author_email=email,
maintainer="Filipe Fernandes",
maintainer_email=email,
url="https://pypi.python.org/pypi/oceans/",
platforms="any",
keywords=["oceanography", "data analysis"],
extras_require=soft,
install_requires=hard,
python_requires=">=3.6",
tests_require=["pytest"],
)
|
bsd-3-clause
|
kjung/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
50
|
13330
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
jkarnows/scikit-learn
|
examples/svm/plot_iris.py
|
62
|
3251
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/plot/wavelengthgrid.py
|
1
|
8839
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.plotting.wavelengthgrid Contains the WavelengthGridPlotter class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import numpy as np
from textwrap import wrap
import matplotlib.pyplot as plt
from collections import OrderedDict
from matplotlib.ticker import FormatStrFormatter
# Import the relevant PTS classes and modules
from ..tools.logging import log
# -----------------------------------------------------------------
line_styles = ['-', '--', '-.', ':']
filled_markers = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd']
pretty_colors = ["r", "dodgerblue", "purple", "darkorange", "lawngreen", "yellow", "darkblue", "teal", "darkgreen", "lightcoral", "crimson", "saddlebrown"]
# -----------------------------------------------------------------
class WavelengthGridPlotter(object):
"""
This class ...
"""
def __init__(self, title=None):
"""
This function ...
:return:
"""
# Set the title
self.title = title
# The wavelength grids
self.grids = OrderedDict()
# SEDs to plot together with the wavelength grids
self.seds = OrderedDict()
# Emission lines
self.emission_lines = []
# The axis limits
self.min_wavelength = None
self.max_wavelength = None
# The figure
self._figure = None
# Properties
self.size = (20, 5)
self.colormap = "rainbow" # or "nipy_spectral"
self.format = None
self.transparent = False
# -----------------------------------------------------------------
def set_title(self, title):
"""
This function ...
:param title:
:return:
"""
self.title = title
# -----------------------------------------------------------------
def add_wavelength_grid(self, grid, label):
"""
This function ...
:param grid:
:param label:
:return:
"""
self.grids[label] = copy.deepcopy(grid)
# -----------------------------------------------------------------
def add_wavelength_point(self, label, wavelength):
"""
This function ...
:param label:
:param wavelength:
:return:
"""
self.grids[label].add_point(wavelength)
# -----------------------------------------------------------------
def add_sed(self, sed, label):
"""
This function ...
:param sed:
:param label:
:return:
"""
self.seds[label] = sed
# -----------------------------------------------------------------
def add_emission_line(self, line):
"""
This function ...
:param line:
:return:
"""
self.emission_lines.append(line)
# -----------------------------------------------------------------
def run(self, output_path, min_wavelength=0.019, max_wavelength=2050):
"""
This function ...
:param output_path:
:param min_wavelength:
:param max_wavelength:
:return:
"""
# Set the axis limits
self.min_wavelength = min_wavelength
self.max_wavelength = max_wavelength
# Make the plot
self.plot(output_path)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the wavelength grid plotter ...")
# Set default values for all attributes
self.title = None
self.grids = OrderedDict()
self.seds = OrderedDict()
self.emission_lines = []
self.min_wavelength = None
self.max_wavelength = None
self._figure = None
self.colormap = "rainbow"
self.format = None
self.transparent = False
# -----------------------------------------------------------------
def plot(self, path):
"""
This function ...
:param path:
:return:
"""
# Inform the user
log.info("Plotting the wavelength grid ...")
# Create the figure
self._figure = plt.figure(figsize=self.size)
plt.ylabel('$T_\lambda$', fontsize=28)
plt.xlabel('$\lambda/\mu$m', fontsize=28)
# -----------------------------------------------------------------
plt.subplot2grid((4, 1), (0, 0), rowspan=3)
# Setup x axis
plt.xlim(self.min_wavelength, self.max_wavelength)
plt.xscale('log')
plt.gca().xaxis.set_ticklabels([])
# Set up y axis
plt.ylim(1, 4)
plt.gca().yaxis.set_visible(False)
plt.grid('on')
# Loop over the SEDs
for label in self.seds:
# Get the wavelengths and fluxes array
wavelengths = self.seds[label].wavelengths(asarray=True)
fluxes = self.seds[label].fluxes(asarray=True)
nonzero = fluxes != 0
wavelengths = wavelengths[nonzero]
fluxes = fluxes[nonzero]
log_fluxes = 4. + 0.3 * np.log(fluxes / fluxes.max())
# Plot the SED
plt.plot(wavelengths, log_fluxes, color='c', lw=0.3)
# Loop over the wavelength grids
for label in self.grids:
# Get the wavelength points
grid = self.grids[label]
wavelengths = grid.wavelengths(asarray=True)
nwavelengths = grid.nwavelengths
# Plot the grid points
plt.scatter(wavelengths, [1.2 for _ in wavelengths], 10, marker='.', color='b', linewidths=0)
t = plt.text(0.021, 1.3, "{} grid points".format(nwavelengths), horizontalalignment='left',
fontsize='xx-small', color='b', backgroundcolor='w')
t.set_bbox(dict(color='w', alpha=0.5, edgecolor='w'))
# Plot a vertical line for each grid point
for w in wavelengths: plt.vlines(w, 1, 4, color='b', lw=0.2, alpha=0.2)
# Plot the deltas
plt.subplot2grid((4, 1), (3, 0), rowspan=1)
plt.plot(wavelengths[1:], np.log10(wavelengths[1:]) - np.log10(wavelengths[:-1]), 'm-', lw=0.6)
plt.xlim(0.019, 2050)
plt.xscale('log')
plt.gca().xaxis.set_major_formatter(FormatStrFormatter("%g"))
plt.xlabel(r"$\lambda\,(\mu \mathrm{m})$", fontsize='large')
plt.ylim(0, 0.05)
plt.ylabel(r"$\Delta\lambda\,(\mathrm{dex})$", fontsize='large')
plt.grid('on')
# plot a labeled vertical line for each emission line
colors = ['m', 'r', 'y', 'g']
positions = [1.3, 1.45, 1.6, 1.75]
index = 0
# Loop over the emission lines
for line in self.emission_lines:
# Get center wavelength and label
center = line.center
label = line.label
if len(label) > 0:
plt.vlines(center, 1, 2, color=colors[index], lw=0.3)
t = plt.text(center, positions[index], label, horizontalalignment='center',
fontsize='xx-small', color=colors[index], backgroundcolor='w')
t.set_bbox(dict(color='w', alpha=0.5, edgecolor='w'))
index = (index + 1) % 4
# -----------------------------------------------------------------
# Set the title
if self.title is not None: self._figure.suptitle("\n".join(wrap(self.title, 60)))
# Finish
self.finish_plot(path)
# -----------------------------------------------------------------
def finish_plot(self, path):
"""
This function ...
:return:
"""
# Debugging
if type(path).__name__ == "BytesIO": log.debug("Saving the SED plot to a buffer ...")
elif path is None: log.debug("Showing the SED plot ...")
else: log.debug("Saving the SED plot to " + str(path) + " ...")
if path is not None:
# Save the figure
plt.savefig(path, bbox_inches='tight', pad_inches=0.25, transparent=self.transparent, format=self.format)
else: plt.show()
plt.close()
# -----------------------------------------------------------------
|
mit
|
comses/docker-cml-examples
|
projects/adapted_axtell/plotting.py
|
1
|
19774
|
import parameters
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import ggplot as gg
try:
plt.style.use('ggplot')
except AttributeError:
pass
def firms_dynamics_plot(decision):
data = pd.read_csv(os.path.join(parameters.OUTPUT_PATH, "temp_general_firms_pop_%s_decision_%s_time_%s.txt" %
(parameters.pop_reducer, decision, parameters.final_Time)), sep=",", header=None,
decimal=",").astype(float)
# Renaming the columns names
data.columns = ['time', 'total_firms', 'average_output', 'average_age', 'average_size', 'new_firms', 'exit_firms',
'max_size', 'total_effort', 'average_effort']
# Logical test to control the process of initial time exclusion from the plots
if parameters.adjustment_time > 0:
data = data.loc[(data['time']).astype(int) >= parameters.adjustment_time, :]
# Variable to add to plot's title
title_pop_val = float(parameters.pop_reducer) * 100
# Creating a list of years to plot
list_of_years_division = list(range(int(data['time'].min()), int(data['time'].max()), 12)) \
+ [data['time'].max() + 1]
list_of_years = [int(i / 12) for i in list_of_years_division]
# Graphics parameters
dpi_var_plot = 700
width_var_plot = 15
height_var_plot = 10
###################################################################################################################
# Plotting AGENTS UTILITY
# Total firms
plot_data = gg.ggplot(data, gg.aes('time', 'total_firms')) + gg.geom_line() + gg.scale_y_continuous(breaks=11) + \
gg.scale_x_discrete(breaks=list_of_years_division, labels=list_of_years) + \
gg.ggtitle('Total firms') + gg.xlab('Years') + gg.ylab('Total of Firms') + gg.theme_bw()
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_general_total_firms_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_general_total_firms_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
gg.ggsave(plot_data, os.path.join(parameters.OUTPUT_PATH, ('temp_general_total_firms_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
width=width_var_plot, height=height_var_plot, units="in")
# Plot of average of output
plot_data = gg.ggplot(data, gg.aes('time', 'average_output')) + gg.geom_line() + gg.scale_y_continuous(breaks=11) + \
gg.scale_x_discrete(breaks=list_of_years_division, labels=list_of_years) \
+ gg.ggtitle('Average of output') + gg.xlab('Years') + gg.ylab('Units')+ gg.theme_bw()
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_output_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_output_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
gg.ggsave(plot_data, os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_output_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
width=width_var_plot, height=height_var_plot, units="in")
# Plot of average of age
plot_data = gg.ggplot(data, gg.aes('time', 'average_age')) + gg.geom_line() + gg.scale_y_continuous(breaks=11) + \
gg.scale_x_discrete(breaks=list_of_years_division, labels=list_of_years)\
+ gg.ggtitle('Average of age of firms') + gg.xlab('Years') + gg.ylab('Age of Firms') + gg.theme_bw()
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_age_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_age_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
gg.ggsave(plot_data, os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_age_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
width=width_var_plot, height=height_var_plot, units="in")
# Average of size
plot_data = gg.ggplot(data, gg.aes('time', 'average_size')) + gg.geom_line() + gg.scale_y_continuous(breaks=11) + \
gg.scale_x_discrete(breaks=list_of_years_division, labels=list_of_years) \
+ gg.ggtitle('Average of size of firms') + gg.xlab('Years') + gg.ylab('Units') + gg.theme_bw()
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_size_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_size_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
gg.ggsave(plot_data, os.path.join(parameters.OUTPUT_PATH, ('temp_general_average_size_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
width=width_var_plot, height=height_var_plot, units="in")
# Plot of number of new firms
plot_data = gg.ggplot(data, gg.aes('time', 'new_firms')) + gg.geom_line() + gg.scale_y_continuous(breaks=11) + \
gg.scale_x_discrete(breaks=list_of_years_division, labels=list_of_years)\
+ gg.ggtitle('Number of new firms') + gg.xlab('Years') + gg.ylab('Units') + gg.theme_bw()
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_general_number_of_new_firms_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_general_number_of_new_firms_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
gg.ggsave(plot_data, os.path.join(parameters.OUTPUT_PATH, ('temp_general_number_of_new_firms_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
width=width_var_plot, height=height_var_plot, units="in")
# Number of exit firms
plot_data = gg.ggplot(data, gg.aes('time', 'exit_firms')) + gg.geom_line() + gg.scale_y_continuous(breaks=11) + \
gg.scale_x_discrete(breaks=list_of_years_division, labels=list_of_years) \
+ gg.ggtitle('Number of firms out') + gg.xlab('Years') + gg.ylab('Units') + gg.theme_bw()
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_general_number_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_general_number_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
gg.ggsave(plot_data, os.path.join(parameters.OUTPUT_PATH, ('temp_general_number_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
width=width_var_plot, height=height_var_plot, units="in")
# Average and max size of firms
dat_merged = pd.concat([data.iloc[:, data.columns == 'average_effort'],
data.iloc[:, data.columns == 'total_effort']], axis=1)
plot_data = dat_merged.plot(title='Average and maximum effort of employees')
plot_data.set_xlabel('Years')
plot_data.set_ylabel('Values units of effort')
plot_data.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_data.set_xticks(list_of_years_division)
plot_data.set_xticklabels(list_of_years)
plot_data.set_axis_bgcolor('w')
fig = plot_data.get_figure()
fig.set_size_inches(width_var_plot, height_var_plot)
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_average_and_maximum_effort_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_average_and_maximum_effort_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
fig.savefig(os.path.join(parameters.OUTPUT_PATH, ('temp_average_and_maximum_effort_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
dpi=dpi_var_plot)
dat_merged = pd.concat([data.iloc[:, data.columns == 'average_size'],
data.iloc[:, data.columns == 'max_size']], axis=1)
plot_data = dat_merged.plot(title='Average and maximum size firms')
plot_data.set_xlabel('Years')
plot_data.set_ylabel('Number of employees')
plot_data.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_data.set_xticks(list_of_years_division)
plot_data.set_xticklabels(list_of_years)
plot_data.set_axis_bgcolor('w')
fig = plot_data.get_figure()
fig.set_size_inches(width_var_plot, height_var_plot)
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('temp_average_size_and_maximum_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('temp_average_size_and_maximum_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
fig.savefig(os.path.join(parameters.OUTPUT_PATH, ('temp_average_size_and_maximum_of_firms_out_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
dpi=dpi_var_plot)
def agents_dynamics_plot(decision):
data = pd.read_csv(os.path.join(parameters.OUTPUT_PATH,"temp_general_agents_pop_%s_decision_%s_time_%s.txt" %
(parameters.pop_reducer, decision, parameters.final_Time)), sep=",", header=None,
decimal=",").astype(float)
data.columns = ['time','municipality','average_utility','average_effort']
# Logical test to control the initial adjustment time for the plots
if parameters.adjustment_time > 0:
data = data.loc[(data['time']).astype(int) >= parameters.adjustment_time, :]
# Time adjusted
year, months = divmod(parameters.adjustment_time, 12)
# Variable to add to plot title
title_pop_val = float(parameters.pop_reducer) * 100
# Graph parameters
dpi_var_plot = 700
width_var_plot = 15
height_var_plot = 10
# Create a list of a years to plot
list_of_years_division = list(range(int(data['time'].min()), int(data['time'].max()), 12)) + [data['time'].max() + 1]
list_of_years = [int(i / 12) for i in list_of_years_division]
###################################################################################################################
# Plotting AGENTS UTILITY
data_utility = data.pivot(index='time', columns='municipality', values='average_utility')
plot_data = data_utility.plot(title='Average utility agents by municipality, by time')
plot_data.set_xlabel('Years')
plot_data.set_ylabel('Values units')
plot_data.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_data.set_xticks(list_of_years_division)
plot_data.set_xticklabels(list_of_years)
plot_data.set_axis_bgcolor('w')
fig = plot_data.get_figure()
fig.set_size_inches(width_var_plot, height_var_plot)
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('agents_utility_by_region_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('agents_utility_by_region_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
fig.savefig(os.path.join(parameters.OUTPUT_PATH, ('agents_utility_by_region_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
dpi=dpi_var_plot)
# AGENTS EFFORT
data_effort = data.pivot(index='time', columns='municipality', values='average_effort')
plot_data = data_effort.plot(title='Average effort agents by municipality, by time')
plot_data.set_xlabel('Years')
plot_data.set_ylabel('Values units')
plot_data.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_data.set_xticks(list_of_years_division)
plot_data.set_xticklabels(list_of_years)
plot_data.set_axis_bgcolor('w')
fig = plot_data.get_figure()
fig.set_size_inches(width_var_plot, height_var_plot)
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('agents_effort_by_region_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('agents_effort_by_region_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
fig.savefig(os.path.join(parameters.OUTPUT_PATH, ('agents_effort_by_region_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
dpi=dpi_var_plot)
def firms_together_plot(decision):
data = pd.read_csv(os.path.join(parameters.OUTPUT_PATH, "temp_general_firms_pop_%s_decision_%s_time_%s.txt" %
(parameters.pop_reducer, decision, parameters.final_Time)), sep=",", header=None,
decimal=",").astype(float)
data.columns = ['time', 'total_firms', 'average_output', 'average_age', 'average_size', 'new_firms', 'exit_firms',
'max_size', 'total_effort', 'average_effort']
# Logical test to control initial time adjustment
if parameters.adjustment_time > 0:
data = data.loc[(data['time']).astype(int) >= parameters.adjustment_time, :]
# Time adjusted
year, months = divmod(parameters.adjustment_time, 12)
# variable to add in the plot title
title_pop_val = float(parameters.pop_reducer) * 100
# Graph parameters
dpi_var_plot = 700
width_var_plot = 15
height_var_plot = 10
# Creating a list of a years to plot
list_of_years_division = list(range(int(data['time'].min()), int(data['time'].max()), 12)) + [data['time'].max()
+ 1]
list_of_years = [int(i / 12) for i in list_of_years_division]
###############################################################################################################
# plotting AGENTS UTILITY
data = data.iloc[:, data.columns != 'average_output']
data = data.iloc[:, data.columns != 'average_size']
data = data.iloc[:, data.columns != 'average_age']
data = data.iloc[:, data.columns != 'time']
data = pd.concat([data.iloc[:, data.columns == 'total_firms'],
data.iloc[:, data.columns == 'new_firms'],
data.iloc[:, data.columns == 'exit_firms'],
data.iloc[:, data.columns == 'max_size'],
data.iloc[:, data.columns == 'total_effort'],
data.iloc[:, data.columns == 'average_effort']], axis=1)
plot_data = data.plot(title='Firms variables, by time')
plot_data.set_xlabel('Years')
plot_data.set_ylabel('Values in units')
plot_data.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_data.set_xticks(list_of_years_division)
plot_data.set_xticklabels(list_of_years)
plot_data.set_axis_bgcolor('w')
plot_data.legend(labels=['Total de firmas', 'Novas firmas', 'Firmas fechadas', 'Máximo tamanho', 'Esforço',
'Média do esforço total'])
plot_data.grid('on', which='major', axis='both')
fig = plot_data.get_figure()
fig.set_size_inches(width_var_plot, height_var_plot)
# Logical test to verify presence of plot. If TRUE, old plot is deleted before saving the new one
if os.path.isfile(os.path.join(parameters.OUTPUT_PATH, ('firms_new_exit_total_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time)))) \
is True:
os.remove(os.path.join(parameters.OUTPUT_PATH, ('firms_new_exit_total_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))))
# Saving the plot
fig.savefig(os.path.join(parameters.OUTPUT_PATH, ('firms_new_exit_total_decision_%s_%s_%s.png' %
(decision, title_pop_val, parameters.final_Time))),
dpi=dpi_var_plot)
|
gpl-3.0
|
sinhrks/scikit-learn
|
sklearn/ensemble/tests/test_base.py
|
284
|
1328
|
"""
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
|
bsd-3-clause
|
robin-lai/scikit-learn
|
sklearn/linear_model/tests/test_randomized_l1.py
|
214
|
4690
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
|
bsd-3-clause
|
natj/bender
|
paper/figs/fig8.py
|
1
|
6241
|
import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
import scipy.ndimage as ndimage
#read JP and TH files
#def read_JP_files(fname):
# da = np.genfromtxt(fname, delimiter=" ", comments='#')
# return da[:,0], da[:,1], da[:,2], da[:,3],da[:,4],da[:,5]
#Read JN files
def read_JN_files(fname):
da = np.genfromtxt(fname, delimiter=",")
return da[:,0],da[:,1],da[:,2],da[:,3],da[:,4],da[:,5],da[:,6],da[:,7],da[:,8]
## Plot
fig = figure(figsize=(8,3), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(1, 100)
gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 7.0
#phase limits
xmin = 0.0
xmax = 90.0
ymin = 0.0
ymax = 90.0
#figure shape parameters
panelh = 70
skiph = 30
mfiglim = 0
#path to files
path_files = "../../out_skymaps/"
#labels size
tsize = 8.0
#general parameters
nu = 'f600'
bprof = 'pbb'
rad = 'r15'
mass = 'm1.6'
rho = 'x10'
incls = ['i5','i10','i20','i30','i40','i50','i60','i70','i80','i90']
incls_g = [5,10,20,30,40,50,60,70,80,90]
colat_g = [10,30,50,70,90]
#pre-read one file to get initial values
colat = 'd10'
incl = incls[0]
fname = path_files + nu+bprof+rad+mass+colat+incl+rho
phase_g, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname+'.csv')
Nt = len(phase_g)
phase_t = np.linspace(0.0, 1.0, 200)
incls_t = np.linspace(0.0, 90.0, 100)
maxflux = 0.0
fig.text(0.3, 0.92, 'One spot', ha='center', va='center', size=10)
fig.text(0.7, 0.92, 'Two antipodal spots', ha='center', va='center', size=10)
#empty matrix
pfracs = np.zeros((len(colat_g)+1, len(incls_g)+1))
for k in range(2):
#frame for the main pulse profile fig
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, k])
if k == 0:
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, 0:46])
ax1 = subplot(gs[0, 0:46])
else:
#ax1 = subplot(gs[mfiglim:mfiglim+panelh, 49:95])
ax1 = subplot(gs[0, 49:95])
ax1.minorticks_on()
ax1.set_xlim(xmin, xmax)
ax1.set_ylim(ymin, ymax)
ax1.set_xlabel('Inclination $i$', size=lsize)
if k == 0:
ax1.set_ylabel('Spot colatitude $\\theta_s$', size=lsize)
elif k == 1:
ax1.set_yticklabels([])
for j in range(5):
if j == 0:
colat = '10'
elif j == 1:
colat = '30'
elif j == 2:
colat = '50'
elif j == 3:
colat = '70'
elif j == 4:
colat = '90'
#skymap = np.zeros((Nt, len(incls)))
#skymap = np.zeros((len(incls), Nt))
for q in range(len(incls)):
incl = incls[q]
#incl = incls[0]
fname = path_files + nu+bprof+rad+mass+'d'+colat+incl+rho
phase, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname+'.csv')
#add second spot
if k == 1:
phase2, N2kev2, N6kev2, N12kev2, Nbol2, Fbol2, F2kev2, F6kev2, F12kev2 = read_JN_files(fname+'_2nd.csv')
N2kev += N2kev2
N6kev += N6kev2
N12kev += N12kev2
Nbol += Nbol2
Fbol += Fbol2
F2kev += F2kev2
F6kev += F6kev2
F12kev += F12kev2
#build flux matrix
flux = Fbol
#flux = Fbol / flux.max()
#skymap[:,q] = flux
#skymap[q,:] = flux
pfracs[j+1,q+1] = (flux.max() - flux.min()) / (flux.max() + flux.min())
#print skymap.max()
#print shape(skymap)
#print skymap
#skymap_interp = griddata((phase_g, incls_g), skymap, (phase_t, incls_t), method='cubic')
#skymap_interp = griddata((phase_g, incls_g), skymap, np.meshgrid(phase_t, incls_t), method='cubic')
#print skymap_interp
hdata = pfracs
#xr0 = incls_g[0]
xr0 = 0.0
xr1 = incls_g[-1]
#yr0 = colat_g[0]
yr0 = 0.0
yr1 = colat_g[-1]
#print xr0, xr1, yr0, yr1
extent = [xr0, xr1, yr0, yr1]
hdata_smooth = ndimage.gaussian_filter(hdata, sigma=1.0, order=0)
#hdata_masked = np.ma.masked_where(hdata <= 0.001, hdata)
#im = ax1.imshow(hdata_masked.T,
im = ax1.imshow(hdata.T,
#interpolation='nearest',
interpolation='gaussian',
origin='lower',
extent=extent,
#cmap='Reds',
#cmap='jet',
#cmap='YlGnBu',
cmap='plasma_r',
vmin=0.0,
#vmax=0.4,
vmax=1.0,
aspect='auto')
#levels = [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
levels = [0.1, 0.2, 0.4, 0.6]
#levels = [0.05, 0.1, 0.2, 0.25, 0.3, 0.35]
if (k==0):
manual_locs = [(70, 12),
(70, 18,),
(70, 26),
(70, 33)]
else:
manual_locs = [(70, 12),
(70, 22,),
(70, 33),
(70, 50)]
#cs1 = ax1.contour(hdata_smooth.T,
cs1 = ax1.contour(hdata.T,
levels,
colors = 'k',
origin='lower',
extent=extent)
clabel(cs1, inline=1, fontsize=8, fmt='%1.1f',manual=manual_locs)
#zc = cs1.collections[0]
#setp(zc, linewidth=1)
print hdata
if k == 1:
#mfiglim:mfiglim+panelh, 0:40])
#cbaxes = fig.add_axes([0.90, (mfiglim+panelh)/500, 0.05, panelh/500.0])
cbaxes = subplot(gs[0, 95:97])
cb = colorbar(im,
#label='Probability density',
cax=cbaxes)
cb.set_label('Pulse fraction',size=lsize)
#fig.text(0.5, 0.91-j*0.16, '$\\theta_{\mathrm{s}}$ = '+colat, ha='center', va='center', size=tsize)
mfiglim += panelh+skiph
savefig('fig8.pdf', bbox_inches='tight')
|
mit
|
RebeccaWPerry/vispy
|
vispy/color/colormap.py
|
4
|
38235
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import inspect
import numpy as np
from .color_array import ColorArray
from ..ext.six import string_types
from ..ext.cubehelix import cubehelix
from ..ext.husl import husl_to_rgb
###############################################################################
# Color maps
# Utility functions for interpolation in NumPy.
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _find_controls(x, controls=None, clip=None):
x_controls = np.clip(np.searchsorted(controls, x) - 1, 0, clip)
return x_controls.astype(np.int32)
# Normalization
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
# Interpolation functions in NumPy.
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
def _interpolate_multi(colors, x, controls):
x = x.ravel()
n = len(colors)
# For each element in x, the control index of its bin's left boundary.
x_step = _find_controls(x, controls, n-2)
# The length of each bin.
controls_length = np.diff(controls).astype(np.float32)
# Prevent division by zero error.
controls_length[controls_length == 0.] = 1.
# Like x, but relative to each bin.
_to_clip = x - controls[x_step]
_to_clip /= controls_length[x_step]
x_rel = np.clip(_to_clip, 0., 1.)
return (colors[x_step],
colors[x_step + 1],
x_rel[:, None])
def mix(colors, x, controls=None):
a, b, x_rel = _interpolate_multi(colors, x, controls)
return _mix_simple(a, b, x_rel)
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
# GLSL interpolation functions.
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
def _glsl_step(controls=None):
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls) - 1
assert ncolors >= 2
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
s += """%s {\n return $color_%d;\n} """ % (ifs, i)
return """vec4 colormap(float t) {\n%s\n}""" % s
# Mini GLSL template system for colors.
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
class BaseColormap(object):
"""Class representing a colormap:
t \in [0, 1] --> rgba_color
Parameters
----------
colors : list of lists, tuples, or ndarrays
The control colors used by the colormap (shape = (ncolors, 4)).
Notes
-----
Must be overriden. Child classes need to implement:
glsl_map : string
The GLSL function for the colormap. Use $color_0 to refer
to the first color in `colors`, and so on. These are vec4 vectors.
map(item) : function
Takes a (N, 1) vector of values in [0, 1], and returns a rgba array
of size (N, 4).
"""
# Control colors used by the colormap.
colors = None
# GLSL string with a function implementing the color map.
glsl_map = None
def __init__(self, colors=None):
# Ensure the colors are arrays.
if colors is not None:
self.colors = colors
if not isinstance(self.colors, ColorArray):
self.colors = ColorArray(self.colors)
# Process the GLSL map function by replacing $color_i by the
if len(self.colors) > 0:
self.glsl_map = _process_glsl_template(self.glsl_map,
self.colors.rgba)
def map(self, item):
"""Return a rgba array for the requested items.
This function must be overriden by child classes.
This function doesn't need to implement argument checking on `item`.
It can always assume that `item` is a (N, 1) array of values between
0 and 1.
Parameters
----------
item : ndarray
An array of values in [0,1].
Returns
-------
rgba : ndarray
An array with rgba values, with one color per item. The shape
should be ``item.shape + (4,)``.
Notes
-----
Users are expected to use a colormap with ``__getitem__()`` rather
than ``map()`` (which implements a lower-level API).
"""
raise NotImplementedError()
def __getitem__(self, item):
if isinstance(item, tuple):
raise ValueError('ColorArray indexing is only allowed along '
'the first dimension.')
# Ensure item is either a scalar or a column vector.
item = _vector(item, type='column')
# Clip the values in [0, 1].
item = np.clip(item, 0., 1.)
colors = self.map(item)
return ColorArray(colors)
def __setitem__(self, item, value):
raise RuntimeError("It is not possible to set items to "
"BaseColormap instances.")
def _repr_html_(self):
n = 100
html = ("""
<style>
table.vispy_colormap {
height: 30px;
border: 0;
margin: 0;
padding: 0;
}
table.vispy_colormap td {
width: 3px;
border: 0;
margin: 0;
padding: 0;
}
</style>
<table class="vispy_colormap">
""" +
'\n'.join([(("""<td style="background-color: %s;"
title="%s"></td>""") % (color, color))
for color in self[np.linspace(0., 1., n)].hex]) +
"""
</table>
""")
return html
def _default_controls(ncolors):
"""Generate linearly spaced control points from a set of colors."""
return np.linspace(0., 1., ncolors)
# List the parameters of every supported interpolation mode.
_interpolation_info = {
'linear': {
'ncontrols': lambda ncolors: ncolors, # take ncolors as argument
'glsl_map': _glsl_mix, # take 'controls' as argument
'map': mix,
},
'zero': {
'ncontrols': lambda ncolors: (ncolors+1),
'glsl_map': _glsl_step,
'map': step,
}
}
class Colormap(BaseColormap):
"""A colormap defining several control colors and an interpolation scheme.
Parameters
----------
colors : list of colors | ColorArray
The list of control colors. If not a ``ColorArray``, a new
``ColorArray`` instance is created from this list. See the
documentation of ``ColorArray``.
controls : array-like
The list of control points for the given colors. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Examples
--------
Here is a basic example:
>>> from vispy.color import Colormap
>>> cm = Colormap(['r', 'g', 'b'])
>>> cm[0.], cm[0.5], cm[np.linspace(0., 1., 100)]
"""
def __init__(self, colors, controls=None, interpolation='linear'):
self.interpolation = interpolation
ncontrols = self._ncontrols(len(colors))
# Default controls.
if controls is None:
controls = _default_controls(ncontrols)
assert len(controls) == ncontrols
self._controls = np.array(controls, dtype=np.float32)
self.glsl_map = self._glsl_map_generator(self._controls)
super(Colormap, self).__init__(colors)
@property
def interpolation(self):
"""The interpolation mode of the colormap"""
return self._interpolation
@interpolation.setter
def interpolation(self, val):
if val not in _interpolation_info:
raise ValueError('The interpolation mode can only be one of: ' +
', '.join(sorted(_interpolation_info.keys())))
# Get the information of the interpolation mode.
info = _interpolation_info[val]
# Get the function that generates the GLSL map, as a function of the
# controls array.
self._glsl_map_generator = info['glsl_map']
# Number of controls as a function of the number of colors.
self._ncontrols = info['ncontrols']
# Python map function.
self._map_function = info['map']
self._interpolation = val
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
class CubeHelixColormap(Colormap):
def __init__(self, start=0.5, rot=1, gamma=1.0, reverse=True, nlev=32,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1., **kwargs):
"""Cube helix colormap
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to
False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 32.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
"""
super(CubeHelixColormap, self).__init__(
cubehelix(start=start, rot=rot, gamma=gamma, reverse=reverse,
nlev=nlev, minSat=minSat, maxSat=maxSat,
minLight=minLight, maxLight=maxLight, **kwargs))
class _Fire(BaseColormap):
colors = [(1.0, 1.0, 1.0, 1.0),
(1.0, 1.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0)]
glsl_map = """
vec4 fire(float t) {
return mix(mix($color_0, $color_1, t),
mix($color_1, $color_2, t*t), t);
}
"""
def map(self, t):
a, b, d = self.colors.rgba
c = _mix_simple(a, b, t)
e = _mix_simple(b, d, t**2)
return _mix_simple(c, e, t)
class _Grays(BaseColormap):
glsl_map = """
vec4 grays(float t) {
return vec4(t, t, t, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, t, np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, t, 1.0], dtype=np.float32)
class _Ice(BaseColormap):
glsl_map = """
vec4 ice(float t) {
return vec4(t, t, 1.0, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, np.ones(t.shape),
np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, 1.0, 1.0], dtype=np.float32)
class _Hot(BaseColormap):
colors = [(0., .33, .66, 1.0),
(.33, .66, 1., 1.0)]
glsl_map = """
vec4 hot(float t) {
return vec4(smoothstep($color_0.rgb, $color_1.rgb, vec3(t, t, t)),
1.0);
}
"""
def map(self, t):
rgba = self.colors.rgba
smoothed = smoothstep(rgba[0, :3], rgba[1, :3], t)
return np.hstack((smoothed, np.ones((len(t), 1))))
class _Winter(BaseColormap):
colors = [(0.0, 0.0, 1.0, 1.0),
(0.0, 1.0, 0.5, 1.0)]
glsl_map = """
vec4 winter(float t) {
return mix($color_0, $color_1, sqrt(t));
}
"""
def map(self, t):
return _mix_simple(self.colors.rgba[0],
self.colors.rgba[1],
np.sqrt(t))
class _SingleHue(Colormap):
"""A colormap which is solely defined by the given hue and value.
Given the color hue and value, this color map increases the saturation
of a color. The start color is almost white but still contains a hint of
the given color, and at the end the color is fully saturated.
Parameters
----------
hue : scalar, optional
The hue refers to a "true" color, without any shading or tinting.
Must be in the range [0, 360]. Defaults to 200 (blue).
saturation_range : array-like, optional
The saturation represents how "pure" a color is. Less saturation means
more white light mixed in the color. A fully saturated color means
the pure color defined by the hue. No saturation means completely
white. This colormap changes the saturation, and with this parameter
you can specify the lower and upper bound. Default is [0.2, 0.8].
value : scalar, optional
The value defines the "brightness" of a color: a value of 0.0 means
completely black while a value of 1.0 means the color defined by the
hue without shading. Must be in the range [0, 1.0]. The default value
is 1.0.
Notes
-----
For more information about the hue values see the `wikipedia page`_.
.. _wikipedia page: https://en.wikipedia.org/wiki/Hue
"""
def __init__(self, hue=200, saturation_range=[0.1, 0.8], value=1.0):
colors = ColorArray([
(hue, saturation_range[0], value),
(hue, saturation_range[1], value)
], color_space='hsv')
super(_SingleHue, self).__init__(colors)
class _HSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
a circular color space.
This means that we change the hue value while keeping the
saturation and value constant.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value (brightness) component of the colors to generate. Must
be in the range [0, 1.0], and the default is 1.0
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=1.0,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
colors = ColorArray([(hue, saturation, value) for hue in hues],
color_space='hsv')
super(_HSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _HUSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
the HUSL hue space.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value component of the colors to generate or "brightness". Must
be in the range [0, 1.0], and the default is 0.7.
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Notes
-----
For more information about HUSL colors see http://husl-colors.org
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=0.7,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
saturation *= 99
value *= 99
colors = ColorArray(
[husl_to_rgb(hue, saturation, value) for hue in hues],
)
super(_HUSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _Diverging(Colormap):
def __init__(self, h_pos=20, h_neg=250, saturation=1.0, value=0.7,
center="light"):
saturation *= 99
value *= 99
start = husl_to_rgb(h_neg, saturation, value)
mid = ((0.133, 0.133, 0.133) if center == "dark" else
(0.92, 0.92, 0.92))
end = husl_to_rgb(h_pos, saturation, value)
colors = ColorArray([start, mid, end])
super(_Diverging, self).__init__(colors)
# https://github.com/matplotlib/matplotlib/pull/4707/files#diff-893cf0348279e9f4570488a7a297ab1eR774
# Taken from original Viridis colormap data in matplotlib implementation
# Sampled 128 points from the raw data-set of 256 samples.
# Sub sampled to 128 points since 256 points causes VisPy to freeze.
# HACK: Ideally, all 256 points should be included, with VisPy generating
# a 1D texture lookup for ColorMap, rather than branching code.
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_colormaps = dict(
# Some colormap presets
autumn=Colormap([(1., 0., 0., 1.), (1., 1., 0., 1.)]),
blues=Colormap([(1., 1., 1., 1.), (0., 0., 1., 1.)]),
cool=Colormap([(0., 1., 1., 1.), (1., 0., 1., 1.)]),
greens=Colormap([(1., 1., 1., 1.), (0., 1., 0., 1.)]),
reds=Colormap([(1., 1., 1., 1.), (1., 0., 0., 1.)]),
spring=Colormap([(1., 0., 1., 1.), (1., 1., 0., 1.)]),
summer=Colormap([(0., .5, .4, 1.), (1., 1., .4, 1.)]),
fire=_Fire(),
grays=_Grays(),
hot=_Hot(),
ice=_Ice(),
winter=_Winter(),
light_blues=_SingleHue(),
orange=_SingleHue(hue=35),
viridis=Colormap(ColorArray(_viridis_data[::2])),
# Diverging presets
coolwarm=Colormap(ColorArray(
[
(226, 0.59, 0.92), (222, 0.44, 0.99), (218, 0.26, 0.97),
(30, 0.01, 0.87),
(20, 0.3, 0.96), (15, 0.5, 0.95), (8, 0.66, 0.86)
],
color_space="hsv"
)),
PuGr=_Diverging(145, 280, 0.85, 0.30),
GrBu=_Diverging(255, 133, 0.75, 0.6),
GrBu_d=_Diverging(255, 133, 0.75, 0.6, "dark"),
RdBu=_Diverging(220, 20, 0.75, 0.5),
# Configurable colormaps
cubehelix=CubeHelixColormap(),
single_hue=_SingleHue,
hsl=_HSL,
husl=_HUSL,
diverging=_Diverging
)
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
def get_colormaps():
"""Return the list of colormap names."""
return _colormaps.copy()
|
bsd-3-clause
|
nyuwireless/ns3-mmwave
|
src/core/examples/sample-rng-plot.py
|
188
|
1246
|
# -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
gpl-2.0
|
tensorflow/lingvo
|
lingvo/core/summary_utils.py
|
1
|
17022
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for generating summaries."""
import re
import time
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import cluster_factory
from lingvo.core import plot
from lingvo.core import py_utils
import numpy as np
def _ShouldAddSummary():
return cluster_factory.Current().add_summary
def scalar(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
return tf.summary.scalar(*args, **kwargs)
def scalar_input_stats(*args, **kwargs): # pylint: disable=invalid-name
collections = kwargs.pop('collections', []) + [
base_input_generator.INPUT_DATA_STATS_SUMMARIES_COLLECTION
]
return tf.summary.scalar(*args, **kwargs, collections=collections)
def histogram(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
return tf.summary.histogram(*args, **kwargs)
def image(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
return tf.summary.image(*args, **kwargs)
def text(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
return tf.summary.text(*args, **kwargs)
def scalar_v2(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
if len(args) <= 2 and 'step' not in kwargs:
kwargs['step'] = py_utils.GetGlobalStep()
tf.compat.v2.summary.scalar(*args, **kwargs)
def histogram_v2(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
if len(args) <= 2 and 'step' not in kwargs:
kwargs['step'] = py_utils.GetGlobalStep()
tf.compat.v2.summary.histogram(*args, **kwargs)
def image_v2(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
if len(args) <= 2 and 'step' not in kwargs:
kwargs['step'] = py_utils.GetGlobalStep()
tf.compat.v2.summary.image(*args, **kwargs)
def text_v2(*args, **kwargs): # pylint: disable=invalid-name
if _ShouldAddSummary():
if len(args) <= 2 and 'step' not in kwargs:
kwargs['step'] = py_utils.GetGlobalStep()
tf.compat.v2.summary.text(*args, **kwargs)
def SequenceLength(padding):
"""Computes the length of a sequence based on binary padding.
Args:
padding: A tensor of binary paddings shaped [batch, seqlen].
Returns:
seq_lens, A tensor of shape [batch] containing the non-padded length of each
element of plot_tensor along the batch dimension.
"""
seq_lens = tf.cast(tf.round(tf.reduce_sum(1 - padding, axis=1)), tf.int32)
# Get rid of any extra dimensions.
batch_size = tf.shape(padding)[0]
seq_lens = tf.reshape(seq_lens, [batch_size], name='seq_lens')
return seq_lens
def TrimPaddingAndPlotSequence(fig, axes, seq_matrix, seq_len, **kwargs):
"""Trims the time axis of seq_matrix with shape (dim, time) and plots it.
For use as a plot function with MatplotlibFigureSummary.
Args:
fig: A matplotlib figure handle.
axes: A matplotlib axes handle.
seq_matrix: A 2D ndarray shaped (num_rows, time).
seq_len: Integer length to use to trim the time axis of seq_matrix.
**kwargs: Additional keyword args to pass to plot.AddImage.
"""
plot.AddImage(fig, axes, seq_matrix[:, :seq_len], **kwargs)
def TrimPaddingAndPlotAttention(fig,
axes,
atten_matrix,
src_len,
tgt_len,
transcript=None,
**kwargs):
"""Trims axes of atten_matrix with shape (tgt_time, src_time) and plots it.
For use as a plot function with MatplotlibFigureSummary.
Args:
fig: A matplotlib figure handle.
axes: A matplotlib axes handle.
atten_matrix: A 2D ndarray shaped (tgt_time, src_time).
src_len: Integer length to use to trim the src_time axis of atten_matrix.
tgt_len: Integer length to use to trim the tgt_time axis of atten_matrix.
transcript: transcript for the target sequence.
**kwargs: Additional keyword args to pass to plot.AddImage.
"""
plot.AddImage(
fig, axes, atten_matrix[:tgt_len, :src_len], clim=(0, 1), **kwargs)
if transcript is not None:
if isinstance(transcript, np.ndarray):
transcript = ' '.join(transcript[:src_len])
axes.set_xlabel(plot.ToUnicode(transcript), size='x-small', wrap=True)
def AddAttentionSummary(name,
attention_tensors,
src_paddings,
tgt_paddings,
transcripts=None,
max_outputs=3):
"""Adds an image summary showing the attention probability matrix and state.
Tensors are in sequence tensor format with the batch dimension in axis 1.
Args:
name: Summary name.
attention_tensors: A list of 3D tensors shaped [target_len, batch_size,
source_len] where attention[i, j, k] is the probability for the i-th
output attending to the k-th input for element j in the batch.
src_paddings: A tensor of binary paddings shaped [source_len, batch] for the
source sequence. Or a list of tensors of the same length as
attention_tensors with a separate paddings for each entry in
attention_tensors.
tgt_paddings: A tensor of binary paddings shaped [target_len, batch] for the
target sequence. Or a list of tensors of the same length as
attention_tensors with a separate paddings for each entry in
attention_tensors.
transcripts: Optional, transcripts shaped [batch, source_len] for the source
sequence.
max_outputs: Integer maximum number of elements of the batch to plot.
"""
def Transpose(paddings):
paddings = paddings if isinstance(paddings, list) else [paddings]
return [tf.transpose(p) for p in paddings]
AddAttentionSummaryBatchMajor(
name, [tf.transpose(a, [1, 0, 2]) for a in attention_tensors],
Transpose(src_paddings), Transpose(tgt_paddings), transcripts,
max_outputs)
def AddAttentionSummaryBatchMajor(name,
attention_tensors,
src_paddings,
tgt_paddings,
transcripts=None,
max_outputs=3):
"""Adds an image summary showing the attention probability matrix and state.
As opposed to AddAttentionSummary() takes all tensors with batch dimension in
axis 0.
Args:
name: Summary name.
attention_tensors: A list of 3D tensors shaped [batch_size, target_len,
source_len] where attention[b, i, j] is the probability for the i-th
output attending to the j-th input for element b in the batch.
src_paddings: A tensor of binary paddings shaped [batch, source_len] for the
source sequence. Or a list of tensors of the same length as
attention_tensors with a separate paddings for each entry in
attention_tensors.
tgt_paddings: A tensor of binary paddings shaped [batch, target_len] for the
target sequence. Or a list of tensors of the same length as
attention_tensors with a separate paddings for each entry in
attention_tensors.
transcripts: Optional, transcripts shaped [batch, source_len] for the source
sequence.
max_outputs: Integer maximum number of elements of the batch to plot.
"""
def VerifyLen(paddings):
length = len(paddings) if isinstance(paddings, list) else 1
if length != 1 and length != len(attention_tensors):
raise ValueError('Bad length of paddings list {}'.format(length))
VerifyLen(src_paddings)
VerifyLen(tgt_paddings)
# Verify shapes.
for i, attention_tensor in enumerate(attention_tensors):
src, tgt = src_paddings, tgt_paddings
src = src[0 if len(src) == 1 else i] if isinstance(src, list) else src
tgt = tgt[0 if len(tgt) == 1 else i] if isinstance(tgt, list) else tgt
tgt_shape = py_utils.GetShape(tgt)
attention_tensors[i] = tf.identity(
py_utils.with_dependencies([
py_utils.assert_equal(
py_utils.GetShape(attention_tensor),
tgt_shape[:2] + [py_utils.GetShape(src)[1]] + tgt_shape[2:])
], attention_tensor),
re.sub(':.*$', '', GetTensorName(attention_tensor, name, i)))
if not _ShouldAddSummary():
return
def ToLengths(paddings):
paddings = paddings if isinstance(paddings, list) else [paddings]
return [SequenceLength(p) for p in paddings]
def Get(lengths, i):
return lengths[0 if len(lengths) == 1 else i]
src_lens = ToLengths(src_paddings)
tgt_lens = ToLengths(tgt_paddings)
with plot.MatplotlibFigureSummary(
name + '/Attention',
max_outputs=max_outputs,
gridspec_kwargs={'hspace': 0.3}) as fig:
for n, atten in enumerate(attention_tensors):
# Diagnostic metric that decreases as attention picks up.
max_entropy = tf.math.log(tf.cast(Get(src_lens, n), tf.float32))
max_entropy = tf.expand_dims(tf.expand_dims(max_entropy, -1), -1)
atten_normalized_entropy = -atten * tf.math.log(atten +
1e-10) / max_entropy
scalar(name + '/Attention/average_normalized_entropy/%d' % n,
tf.reduce_mean(atten_normalized_entropy))
args = [atten, Get(src_lens, n), Get(tgt_lens, n)]
if transcripts is not None and n == 0:
args.append(transcripts)
fig.AddSubplot(
args,
TrimPaddingAndPlotAttention,
title=GetTensorName(atten, name, n),
xlabel='Input',
ylabel='Output')
def AddNormSummary(name, vs_gs):
""""Returns and creates summary for norms of vs and their gradients gs.
Args:
name: A name string for summary.
vs_gs: A `.NestedMap` or a list of `.NestedMap` of (variable, gradient).
Returns:
norm of variables, and norm of gradients.
"""
flatten = py_utils.Flatten(vs_gs)
v_norm = tf.sqrt(py_utils.SumSquared([v for (v, _) in flatten]))
scalar('var_norm/%s' % name, v_norm)
g_norm = tf.sqrt(py_utils.SumSquared([g for (_, g) in flatten]))
scalar('grad_norm/%s' % name, g_norm)
return v_norm, g_norm
def CollectVarHistogram(vs_gs):
"""Adds histogram summaries for variables and gradients."""
for name, (var, grad) in vs_gs.FlattenItems():
name = py_utils.SanitizeScopeKey(name)
with tf.device(var.device), tf.name_scope(name + '/summary'):
if isinstance(grad, tf.IndexedSlices):
var = tf.gather(var, grad.indices)
grad = grad.values
if var.dtype.is_complex:
var = tf.abs(var)
grad = tf.abs(grad)
histogram('var_hist/' + name, var)
histogram('grad_hist/' + name, grad)
def PrepareSequenceForPlot(tensor, padding, name):
"""Prepares a sequence feature for plotting.
The sequence feature is transposed and channels are flattened.
Args:
tensor: A n-D Tensor of shape [batch, time, ...].
padding: A Tensor of shape [batch, time].
name: A string as the name of the reshaped Tensor, which will be used as the
subcaption for plotting.
Returns:
A tuple of:
reshaped_tensor: A 3-D Tensor of shape [batch, dim, time].
sequence_length: A 1-D Tensor of shape [batch].
"""
# Flatten any dimensions beyond the third into the third.
batch_size, max_len = py_utils.GetShape(tensor, 2)
plot_tensor = tf.reshape(tensor, [batch_size, max_len, -1])
plot_tensor = tf.transpose(plot_tensor, [0, 2, 1], name=name)
return (plot_tensor, SequenceLength(padding))
def PlotSequenceFeatures(plots, name, **kwargs):
"""Plots a stack of sequence features.
Args:
plots: A list of tuple (tensor, seq_len), as returned by
PrepareSequenceForPlot().
name: A string for the caption of the plot.
**kwargs: Keyword arguments passed to AddSubplot().
"""
if not _ShouldAddSummary():
return
with plot.MatplotlibFigureSummary(name, figsize=(8, len(plots) * 3.5)) as fig:
for i, (tensor, seq_len) in enumerate(plots):
fig.AddSubplot([tensor, seq_len],
TrimPaddingAndPlotSequence,
title=GetTensorName(tensor, name, i),
**kwargs)
class StatsCounter:
"""A single counter in TF."""
def __init__(self, name):
self._name = name
self._var = py_utils.CreateVariable(
name=name,
params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),
tf.int64),
trainable=False)
self._value = self._var.value() + 0 # Makes a copy.
def Value(self):
"""Returns the current counter value."""
return self._value
def IncBy(self, delta):
"""Increment the counter by delta and return the new value."""
# NOTE: We must ensure _value is computed (_var + 0) before
# updating _var with delta.
delta = tf.cast(delta, tf.int64)
with tf.control_dependencies([self._value]):
scalar(self._name, self._value)
return tf.identity(tf.assign_add(self._var, delta))
class StepRateTracker:
"""A class that tracks step/example rate."""
def __init__(self):
self._first_step = -1
self._time_steps = [] # History of (timestamp, global_step, total_examples)
def ComputeStepRate(self, current_steps, total_examples):
"""Computes the overall step rate."""
if self._time_steps:
total_examples += self._time_steps[-1][-1]
else:
self._first_step = current_steps
self._time_steps.append((time.time(), current_steps, total_examples))
# Keeps a relative long history to compute a smooth steps/second.
# Removes duplicate stats for step = 0 to get rid of the warm-up period.
# Scale up the amount of history used. The first few steps are generally
# much slower and can skew the statistic significantly otherwise.
if current_steps - self._first_step < 1000:
history = 100
elif current_steps - self._first_step < 10000:
history = 1000
else:
history = 10000
while (self._time_steps[-1][1] - self._time_steps[0][1] > history or
(len(self._time_steps) > 1 and
self._time_steps[0][1] == self._time_steps[1][1])):
del self._time_steps[0]
(t0, s0, e0), (t1, s1, e1) = self._time_steps[0], self._time_steps[-1]
rate = 0.0
example_rate = 0.0
if t1 > t0 + 1:
elapsed_secs = t1 - t0
rate = (s1 - s0) / elapsed_secs
example_rate = (e1 - e0) / elapsed_secs
tf.logging.info('Steps/second: %f, Examples/second: %f', rate, example_rate)
return rate, example_rate, total_examples
def ModelAnalysis(model):
"""Returns a text showing variable sizes and their total size."""
class Analyzer:
"""Helper class."""
def __init__(self):
self._seen_var = {}
self.total = 0
def __call__(self, v):
assert isinstance(v, tf.Variable)
# pylint: disable=protected-access
if not v.shape.is_fully_defined():
# Only Cudnn RNN params lack static shapes.
if hasattr(v, 'approx_size'):
size = v.approx_size
else:
return '%-20s %10s %s' % (v.shape, 'n/a', v._shared_name)
else:
size = v.shape.num_elements()
if v._shared_name not in self._seen_var:
self._seen_var[v._shared_name] = size
self.total += size
return '%-20s %10d %s' % (v.shape, size, v._shared_name)
analyzer = Analyzer()
output = '\n'
output += model.vars.Transform(analyzer).DebugString()
output += '\n'
output += '=' * 100
output += f'\ntotal #params: {analyzer.total:,}\n'
return output, analyzer.total
def GetTensorName(tensor, name_eager=None, i_eager=None):
"""Returns tensor name.
It is useful for compatibility with eager mode.
Args:
tensor: tensor
name_eager: additional string to append in eager mode
i_eager: additional index to append in eager mode
Returns:
tensor.name in session mode, or concatenation of name_eager, i_eager
in eager mode
"""
if not tf.executing_eagerly():
tensor_name = tensor.name
else:
if name_eager and i_eager:
tensor_name = f'[eager]_{name_eager}_{i_eager}'
elif name_eager:
tensor_name = f'[eager]_{name_eager}'
elif i_eager:
tensor_name = f'[eager]_{i_eager}'
else:
tensor_name = '[eager]'
return tensor_name
|
apache-2.0
|
martynjarvis/stb-tester
|
stbt-camera.d/stbt_camera_calibrate.py
|
1
|
19844
|
#!/usr/bin/python -u
# Encoding: utf-8
# pylint: disable=W0212
import math
import re
import readline
import subprocess
import sys
import time
from contextlib import contextmanager
from os.path import dirname
import cv2
import gi
import numpy
import _stbt.camera.chessboard as chessboard
import _stbt.core
import stbt
from _stbt import tv_driver
from _stbt.config import set_config, xdg_config_dir
gi.require_version("Gst", "1.0")
from gi.repository import Gst # isort:skip pylint: disable=E0611
COLOUR_SAMPLES = 50
videos = {}
#
# Geometric calibration
#
videos['chessboard'] = chessboard.VIDEO
arrows = list(u'←↙↓↘→↗↑↖')
def off_to_arrow(off):
u"""
>>> print off_to_arrow((1, 1))
↘
>>> print off_to_arrow((-1, 0))
←
"""
if numpy.linalg.norm(off) > 0.5:
angle = math.atan2(off[1], -off[0])
return arrows[int(angle / 2 / math.pi * len(arrows) + len(arrows) + 0.5)
% len(arrows)]
else:
return u'O'
# ANSI colour codes for printing progress.
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
def rate(r):
"""How good is the match on a scale of 0-2?"""
if r < 0.5:
return 2
elif r < 5:
return 1
else:
return 0
def print_error_map(outstream, ideal_points, measured_points):
oldx = 0.0
outstream.write(
BOLD + "Geometric Calibration Report:\n" + ENDC +
"\n"
" Legend:\n"
" " + OKGREEN + "O" + ENDC + " - Pixel perfect\n"
" " + WARNING + "↗" + ENDC + " - Off by up-to 5 pixels\n"
" " + FAIL + "↗" + ENDC + " - Off by more than 5 pixels\n"
"\n")
for ideal, measured in sorted(zip(ideal_points, measured_points),
key=lambda a: (-a[0][1], a[0][0])):
if ideal[0] < oldx:
outstream.write('\n')
off = ideal - measured
outstream.write(
(u"%s%s" % ([FAIL, WARNING, OKGREEN][rate(numpy.linalg.norm(off))],
off_to_arrow(off)))
.encode('utf-8'))
oldx = ideal[0]
outstream.write("\n" + ENDC)
def geometric_calibration(tv, device, interactive=True):
tv.show('chessboard')
sys.stdout.write("Performing Geometric Calibration\n")
chessboard_calibration()
if interactive:
while prompt_for_adjustment(device):
try:
chessboard_calibration()
except chessboard.NoChessboardError:
tv.show('chessboard')
chessboard_calibration()
def chessboard_calibration(timeout=10):
from _stbt.gst_utils import array_from_sample
undistorted_appsink = \
stbt._dut._display.source_pipeline.get_by_name('undistorted_appsink')
sys.stderr.write("Searching for chessboard\n")
endtime = time.time() + timeout
while time.time() < endtime:
sample = undistorted_appsink.emit('pull-sample')
try:
input_image = array_from_sample(sample)
params = chessboard.calculate_calibration_params(input_image)
break
except chessboard.NoChessboardError:
if time.time() > endtime:
raise
geometriccorrection = stbt._dut._display.source_pipeline.get_by_name(
'geometric_correction')
geometriccorrection_params = {
'camera-matrix': ('{fx} 0 {cx}'
' 0 {fy} {cy}'
' 0 0 1').format(**params),
'distortion-coefficients': '{k1} {k2} {p1} {p2} {k3}'.format(**params),
'inv-homography-matrix': (
'{ihm11} {ihm21} {ihm31} '
'{ihm12} {ihm22} {ihm32} '
'{ihm13} {ihm23} {ihm33}').format(**params),
}
for key, value in geometriccorrection_params.items():
geometriccorrection.set_property(key, value)
print_error_map(
sys.stderr,
*chessboard.find_corrected_corners(params, input_image))
set_config(
'global', 'geometriccorrection_params',
' '.join('%s="%s"' % v for v in geometriccorrection_params.items()))
#
# Colour Measurement
#
def qrc(data):
import cStringIO
import qrcode
import qrcode.image.svg
out = cStringIO.StringIO()
qrcode.make(data, image_factory=qrcode.image.svg.SvgPathImage).save(out)
qrsvg = out.getvalue()
return re.search('d="(.*?)"', qrsvg).group(1)
def generate_colours_video():
import random
template_svg = open(dirname(__file__) + '/colours.svg', 'r').read()
for _ in range(0, 10 * 60 * 8):
colour = '#%06x' % random.randint(0, 256 ** 3)
svg = template_svg.replace('#c0ffee', colour)
svg = svg.replace("m 0,0 26,0 0,26 -26,0 z", qrc(colour))
yield (svg, 1.0 / 8 * Gst.SECOND)
videos['colours2'] = ('image/svg', generate_colours_video)
class QRScanner(object):
def __init__(self):
import zbar
self.scanner = zbar.ImageScanner()
self.scanner.parse_config('enable')
def read_qr_codes(self, image):
import zbar
zimg = zbar.Image(image.shape[1], image.shape[0], 'Y800',
cv2.cvtColor(image, cv2.COLOR_BGR2GRAY).tostring())
self.scanner.scan(zimg)
return [s.data for s in zimg]
def analyse_colours_video(number=None):
"""RGB!"""
errors_in_a_row = 0
n = 0
qrscanner = QRScanner()
for frame, _ in stbt.frames():
if number is not None and n >= number:
return
n = n + 1
# The colour is written above and below the rectangle because we want
# to be sure that the top of the colour box is from the same frame as
# the bottom.
codes = qrscanner.read_qr_codes(frame)
if (len(codes) == 4 and re.match('#[0-9a-f]{6}', codes[0]) and
all(c == codes[0] for c in codes)):
colour_hex = codes[0]
desired = numpy.array((
int(colour_hex[1:3], 16),
int(colour_hex[3:5], 16),
int(colour_hex[5:7], 16)))
colour = cv2.mean(frame[240:480, 520:760])
colour = (colour[2], colour[1], colour[0])
yield (n, desired, colour)
errors_in_a_row = 0
else:
errors_in_a_row += 1
if errors_in_a_row > 50:
raise RuntimeError(
"Failed to find hexidecimal colour description")
def avg_colour(colours):
n = len(colours)
return (
sum([c[0] for c in colours]) / n,
sum([c[1] for c in colours]) / n,
sum([c[2] for c in colours]) / n)
example_v4l2_ctl_output = """\
brightness (int) : min=-64 max=64 step=1 default=15 value=15
contrast (int) : min=0 max=95 step=1 default=30 value=30
"""
def v4l2_ctls(device, data=None):
"""
>>> import pprint
>>> pprint.pprint(dict(v4l2_ctls(None, example_v4l2_ctl_output)))
{'brightness': {'default': '15',
'max': '64',
'min': '-64',
'step': '1',
'value': '15'},
'contrast': {'default': '30',
'max': '95',
'min': '0',
'step': '1',
'value': '30'}}
"""
if data is None:
data = subprocess.check_output(['v4l2-ctl', '-d', device, '-l'])
for line in data.split('\n'):
vals = line.strip().split()
if vals == []:
continue
yield (vals[0], dict([v.split('=', 2) for v in vals[3:]]))
def setup_tab_completion(completer):
next_ = [None]
generator = [None]
def readline_completer(text, state):
if state == 0:
generator[0] = iter(completer(text))
next_[0] = 0
assert state == next_[0]
next_[0] += 1
try:
return generator[0].next()
except StopIteration:
return None
readline.parse_and_bind("tab: complete")
readline.set_completer_delims("")
readline.set_completer(readline_completer)
def prompt_for_adjustment(device):
# Allow adjustment
subprocess.check_call(['v4l2-ctl', '-d', device, '-L'])
ctls = dict(v4l2_ctls(device))
def v4l_completer(text):
if text == '':
return ['yes', 'no', 'set']
if text.startswith('set '):
return ['set ' + x + ' '
for x in ctls.keys() if x.startswith(text[4:])]
if "set ".startswith(text.lower()):
return ["set "]
if 'yes'.startswith(text.lower()):
return ["yes"]
if 'no'.startswith(text.lower()):
return ["no"]
setup_tab_completion(v4l_completer)
cmd = raw_input("Happy? [Y/n/set] ").strip().lower()
if cmd.startswith('set'):
x = cmd.split(None, 2)
if len(x) != 3:
print "Didn't understand command %r" % x
else:
_, var, val = x
subprocess.check_call(
['v4l2-ctl', '-d', device, "-c", "%s=%s" % (var, val)])
set_config('global', 'v4l2_ctls', ','.join(
["%s=%s" % (c, a['value'])
for c, a in dict(v4l2_ctls(device)).items()]))
if cmd.startswith('y') or cmd == '':
return False # We're done
else:
return True # Continue looping
def pop_with_progress(iterator, total, width=20, stream=sys.stderr):
stream.write('\n')
for n, v in enumerate(iterator):
if n == total:
break
progress = (n * width) // total
stream.write(
'[%s] %8d / %d\r' % (
'#' * progress + ' ' * (width - progress), n, total))
yield v
stream.write('\r' + ' ' * (total + 28) + '\r')
def fit_fn(ideals, measureds):
"""
>>> f = fit_fn([120, 240, 150, 18, 200],
... [120, 240, 150, 18, 200])
>>> print f(0), f(56)
0.0 56.0
"""
from scipy.optimize import curve_fit # pylint: disable=E0611
from scipy.interpolate import interp1d # pylint: disable=E0611
POINTS = 5
xs = [n * 255.0 / (POINTS + 1) for n in range(0, POINTS + 2)]
def fn(x, ys):
return interp1d(xs, numpy.array([0] + ys + [255]))(x)
ys, _ = curve_fit( # pylint:disable=W0632
lambda x, *args: fn(x, list(args)), ideals, measureds, [0.0] * POINTS)
return interp1d(xs, numpy.array([0] + ys.tolist() + [255]))
@contextmanager
def colour_graph():
if not _can_show_graphs():
sys.stderr.write("Install matplotlib and scipy for graphical "
"assistance with colour calibration\n")
yield lambda: None
return
from matplotlib import pyplot
sys.stderr.write('Analysing colours...\n')
pyplot.ion()
ideals = [[], [], []]
measureds = [[], [], []]
pyplot.figure()
def update():
pyplot.cla()
pyplot.axis([0, 255, 0, 255])
pyplot.ylabel("Measured colour")
pyplot.xlabel("Ideal colour")
pyplot.grid()
for n, ideal, measured in pop_with_progress(
analyse_colours_video(), COLOUR_SAMPLES):
pyplot.draw()
for c in [0, 1, 2]:
ideals[c].append(ideal[c])
measureds[c].append(measured[c])
pyplot.plot([ideal[0]], [measured[0]], 'rx',
[ideal[1]], [measured[1]], 'gx',
[ideal[2]], [measured[2]], 'bx')
fits = [fit_fn(ideals[n], measureds[n]) for n in [0, 1, 2]]
pyplot.plot(range(0, 256), [fits[0](x) for x in range(0, 256)], 'r-',
range(0, 256), [fits[1](x) for x in range(0, 256)], 'g-',
range(0, 256), [fits[2](x) for x in range(0, 256)], 'b-')
pyplot.draw()
try:
yield update
finally:
pyplot.close()
def _can_show_graphs():
try:
# pylint: disable=W0612
from matplotlib import pyplot
from scipy.optimize import curve_fit # pylint: disable=E0611
from scipy.interpolate import interp1d # pylint: disable=E0611
return True
except ImportError:
sys.stderr.write("Install matplotlib and scipy for graphical "
"assistance with colour calibration\n")
return False
def adjust_levels(tv, device):
tv.show("colours2")
with colour_graph() as update_graph:
update_graph()
while prompt_for_adjustment(device):
update_graph()
#
# Uniform Illumination
#
FRAME_AVERAGE_COUNT = 16
videos['blank-white'] = (
'video/x-raw,format=BGR,width=1280,height=720',
lambda: [(bytearray([0xff, 0xff, 0xff]) * 1280 * 720, 60 * Gst.SECOND)])
videos['blank-black'] = (
'video/x-raw,format=BGR,width=1280,height=720',
lambda: [(bytearray([0, 0, 0]) * 1280 * 720, 60 * Gst.SECOND)])
def _create_reference_png(filename):
# Throw away some frames to let everything settle
pop_with_progress(stbt.frames(), 50)
average = None
for frame in pop_with_progress(stbt.frames(), FRAME_AVERAGE_COUNT):
if average is None:
average = numpy.zeros(shape=frame[0].shape, dtype=numpy.uint16)
average += frame[0]
average /= FRAME_AVERAGE_COUNT
cv2.imwrite(filename, numpy.array(average, dtype=numpy.uint8))
def await_blank(brightness):
for frame, _ in stbt.frames(10):
grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
min_, max_, _, _ = cv2.minMaxLoc(grayscale)
contrast = max_ - min_
if contrast < 100 and abs(numpy.median(frame) - brightness) < 100:
break
else:
sys.stderr.write(
"WARNING: Did not detect blank frame of brightness %i" % brightness)
def calibrate_illumination(tv):
img_dir = xdg_config_dir() + '/stbt/'
props = {
'white-reference-image': '%s/vignetting-reference-white.png' % img_dir,
'black-reference-image': '%s/vignetting-reference-black.png' % img_dir,
}
tv.show("blank-white")
await_blank(255)
_create_reference_png(props['white-reference-image'])
tv.show("blank-black")
await_blank(0)
_create_reference_png(props['black-reference-image'])
contraststretch = stbt._dut._display.source_pipeline.get_by_name(
'illumination_correction')
for k, v in reversed(props.items()):
contraststretch.set_property(k, v)
set_config(
'global', 'contraststretch_params',
' '.join(["%s=%s" % (k, v) for k, v in props.items()]))
#
# setup
#
uvcvideosrc = ('uvch264src device=%(v4l2_device)s name=src auto-start=true '
'rate-control=vbr initial-bitrate=5000000 '
'peak-bitrate=10000000 average-bitrate=5000000 '
'v4l2src0::extra-controls="ctrls, %(v4l2_ctls)s" src.vidsrc ! '
'video/x-h264,width=1920 ! h264parse')
v4l2videosrc = 'v4l2src device=%(v4l2_device)s extra-controls=%(v4l2_ctls)s'
def list_cameras():
gi.require_version('GUdev', '1.0')
from gi.repository import GUdev # pylint: disable=E0611
client = GUdev.Client.new(['video4linux/usb_device'])
devices = client.query_by_subsystem('video4linux')
for d in devices:
# Prefer to refer to a device by path. This means that you are
# referring to a particular USB port and is stable across reboots.
dev_files = d.get_device_file_symlinks()
path_dev_files = [x for x in dev_files if 'by-path' in x]
dev_file = (path_dev_files + [d.get_device_file])[0]
name = (d.get_property('ID_VENDOR_ENC').decode('string-escape') + ' ' +
d.get_property('ID_MODEL_ENC').decode('string-escape'))
if d.get_property('ID_USB_DRIVER') == 'uvcvideo':
source_pipeline = uvcvideosrc
else:
source_pipeline = v4l2videosrc
yield (name, dev_file, source_pipeline)
def setup(source_pipeline):
"""If we haven't got a configured camera offer a list of cameras you might
want to use. In the future it could be useful to allow the user to select
one from the list interactively."""
if (source_pipeline == '' or
stbt.get_config('global', 'v4l2_device', '') == ''):
sys.stderr.write(
'No camera configured in stbt.conf please add parameters '
'"v4l2_device" and "source_pipeline" to section [global] of '
'stbt.conf.\n\n')
cameras = list(list_cameras())
if len(cameras) == 0:
sys.stderr.write("No Cameras Detected\n\n")
else:
sys.stderr.write("Detected cameras:\n\n")
for n, (name, dev_file, source_pipeline) in enumerate(cameras):
sys.stderr.write(
" %i. %s\n"
"\n"
" v4l2_device = %s\n"
" source_pipeline = %s\n\n"
% (n, name, dev_file, source_pipeline))
return None
return stbt.get_config('global', 'v4l2_device')
#
# main
#
defaults = {
'contraststretch_params': '',
'v4l2_ctls': (
'brightness=128,contrast=128,saturation=128,'
'white_balance_temperature_auto=0,white_balance_temperature=6500,'
'gain=60,backlight_compensation=0,exposure_auto=1,'
'exposure_absolute=152,focus_auto=0,focus_absolute=0,'
'power_line_frequency=1'),
'transformation_pipeline': (
'stbtgeometriccorrection name=geometric_correction '
' %(geometriccorrection_params)s '
' ! stbtcontraststretch name=illumination_correction '
' %(contraststretch_params)s '),
}
def parse_args(argv):
parser = _stbt.core.argparser()
tv_driver.add_argparse_argument(parser)
parser.add_argument(
'--noninteractive', action="store_false", dest="interactive",
help="Don't prompt, assume default answer to all questions")
parser.add_argument(
'--skip-geometric', action="store_true",
help="Don't perform geometric calibration")
parser.add_argument(
'--skip-illumination', action='store_true',
help="Don't perform uniform illumination calibration")
return parser.parse_args(argv[1:])
def main(argv):
args = parse_args(argv)
device = setup(args.source_pipeline)
if device is None:
return 1
if args.skip_geometric:
set_config('global', 'geometriccorrection_params', '')
for k, v in defaults.iteritems():
set_config('global', k, v)
# Need to re-parse arguments as the settings above may have affected the
# values we get out.
args = parse_args(argv)
transformation_pipeline = (
'tee name=raw_undistorted '
'raw_undistorted. ! queue leaky=upstream ! videoconvert ! '
' textoverlay text="Capture from camera" ! %s '
'raw_undistorted. ! queue ! appsink drop=true sync=false qos=false'
' max-buffers=1 caps="video/x-raw,format=BGR"'
' name=undistorted_appsink '
'raw_undistorted. ! queue leaky=upstream max-size-buffers=1 ! %s' %
(args.sink_pipeline,
stbt.get_config('global', 'transformation_pipeline')))
sink_pipeline = ('textoverlay text="After correction" ! ' +
args.sink_pipeline)
stbt.init_run(args.source_pipeline, sink_pipeline, 'none', False, False,
transformation_pipeline)
tv = tv_driver.create_from_args(args, videos)
if not args.skip_geometric:
geometric_calibration(tv, device, interactive=args.interactive)
if args.interactive:
adjust_levels(tv, device)
if not args.skip_illumination:
calibrate_illumination(tv)
if args.interactive:
raw_input("Calibration complete. Press <ENTER> to exit")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
lgpl-2.1
|
gxxjjj/QuantEcon.py
|
quantecon/estspec.py
|
7
|
4856
|
"""
Filename: estspec.py
Authors: Thomas Sargent, John Stachurski
Functions for working with periodograms of scalar data.
"""
from __future__ import division, print_function
import numpy as np
from numpy.fft import fft
from pandas import ols, Series
def smooth(x, window_len=7, window='hanning'):
"""
Smooth the data in x using convolution with a window of requested
size and type.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
array_like(float)
The smoothed values
Notes
-----
Application of the smoothing window at the top and bottom of x is
done by reflecting x around these points to extend it sufficiently
in each direction.
"""
if len(x) < window_len:
raise ValueError("Input vector length must be >= window length.")
if window_len < 3:
raise ValueError("Window length must be at least 3.")
if not window_len % 2: # window_len is even
window_len += 1
print("Window length reset to {}".format(window_len))
windows = {'hanning': np.hanning,
'hamming': np.hamming,
'bartlett': np.bartlett,
'blackman': np.blackman,
'flat': np.ones # moving average
}
# === Reflect x around x[0] and x[-1] prior to convolution === #
k = int(window_len / 2)
xb = x[:k] # First k elements
xt = x[-k:] # Last k elements
s = np.concatenate((xb[::-1], x, xt[::-1]))
# === Select window values === #
if window in windows.keys():
w = windows[window](window_len)
else:
msg = "Unrecognized window type '{}'".format(window)
print(msg + " Defaulting to hanning")
w = windows['hanning'](window_len)
return np.convolve(w / w.sum(), s, mode='valid')
def periodogram(x, window=None, window_len=7):
"""
Computes the periodogram
.. math::
I(w) = (1 / n) | sum_{t=0}^{n-1} x_t e^{itw} |^2
at the Fourier frequences w_j := 2 pi j / n, j = 0, ..., n - 1,
using the fast Fourier transform. Only the frequences w_j in [0,
pi] and corresponding values I(w_j) are returned. If a window type
is given then smoothing is performed.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional(default=7)
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
"""
n = len(x)
I_w = np.abs(fft(x))**2 / n
w = 2 * np.pi * np.arange(n) / n # Fourier frequencies
w, I_w = w[:int(n/2)+1], I_w[:int(n/2)+1] # Take only values on [0, pi]
if window:
I_w = smooth(I_w, window_len=window_len, window=window)
return w, I_w
def ar_periodogram(x, window='hanning', window_len=7):
"""
Compute periodogram from data x, using prewhitening, smoothing and
recoloring. The data is fitted to an AR(1) model for prewhitening,
and the residuals are used to compute a first-pass periodogram with
smoothing. The fitted coefficients are then used for recoloring.
Parameters
----------
x : array_like(float)
A flat NumPy array containing the data to smooth
window_len : scalar(int), optional
An odd integer giving the length of the window. Defaults to 7.
window : string
A string giving the window type. Possible values are 'flat',
'hanning', 'hamming', 'bartlett' or 'blackman'
Returns
-------
w : array_like(float)
Fourier frequences at which periodogram is evaluated
I_w : array_like(float)
Values of periodogram at the Fourier frequences
"""
# === run regression === #
x_current, x_lagged = x[1:], x[:-1] # x_t and x_{t-1}
x_current, x_lagged = Series(x_current), Series(x_lagged) # pandas series
results = ols(y=x_current, x=x_lagged, intercept=True, nw_lags=1)
e_hat = results.resid.values
phi = results.beta['x']
# === compute periodogram on residuals === #
w, I_w = periodogram(e_hat, window=window, window_len=window_len)
# === recolor and return === #
I_w = I_w / np.abs(1 - phi * np.exp(1j * w))**2
return w, I_w
|
bsd-3-clause
|
MiroK/lega
|
lega/sine_basis.py
|
1
|
10819
|
from __future__ import division
from sympy import sin, sqrt, pi, Number, Expr, Symbol, lambdify, symbols
from common import function, tensor_product
from lega.integration import Quad1d, Quad2d
from scipy.sparse import eye, diags
from math import pi as PI, sqrt as Sqrt
from sympy.mpmath import quad
from itertools import product
import numpy as np
SQRT_PI = Sqrt(pi)
def sine_basis(n, symbol='x'):
'''
Functions sin(k*x), k = 1, 2, ....n, normalized to have L^2 norm over [0, pi].
Note that (sin(k*x), k**2) are all the solutions of
-u`` = lambda u in (0, pi)
u(0) = u(pi) = 0
'''
x = Symbol(symbol)
return [sin(k*x)*sqrt(2/pi) for k in range(1, n+1)]
def sine_function(F):
'''
Return a linear combination of len(F_vec) sine basis functions with
coefficients given by F_vec.
'''
if len(F.shape) == 1:
basis = sine_basis(F.shape[0], 'x')
return function(basis, F)
elif len(F.shape) == 2:
basis_x = sine_basis(F.shape[0], 'x')
basis_y = sine_basis(F.shape[1], 'y')
basis = tensor_product([basis_x, basis_y])
# Collapse to coefs by row
F = F.flatten()
return function(basis, F)
else:
raise NotImplementedError
def mass_matrix(n):
'''inner(u, v) for u, v in sine_basis(n).'''
return eye(n)
def stiffness_matrix(n):
'''inner(u`, v`) for u, v in sine_basis(n).'''
return diags(np.arange(1, n+1)**2, 0, shape=(n, n))
def bending_matrix(n):
'''inner(u``, v``) for u, v in sine_basis(n).'''
return diags(np.arange(1, n+1)**4, 0, shape=(n, n))
# Suppose we have V_n = sine_basis(n) and for some function f we want to compute
# (f, v) for v in V_n, where (o, o) is the L^2 inner product over [0, pi].
# The idea is that if f is extended oddly to [0, 2*pi] then all the terms (f, v)
# can be computed at once by fft.
#
# The flow is eval -> (extend -> fft) -> (take only imag or results = sines)
def sine_points(N):
'''Points where the function is sampled for sine transformation'''
# 1d
if isinstance(N, int):
points = np.linspace(0, 2*PI, 2*N, endpoint=False)[:N]
return points
# 2d
elif hasattr(N, '__len__'):
assert len(N) == 2
# X and Y coordinates of the tensor product
X, Y = [sine_points(N[0]), sine_points(N[1])]
XY = np.array([list(xy) for xy in product(X, Y)])
X, Y = XY[:, 0], XY[:, 1]
return X, Y
def sine_eval(N, f):
'''
Sample f in N+1 points from the interval [0, 2*pi). Or the cartesian product
of this interval
'''
# Symbolic is evaluated in [0, PI]
assert isinstance(f, (Expr, Number))
# 1d
if isinstance(N, int):
points = sine_points(N)
# Numbers are special
if isinstance(f, Number):
return float(f)*np.ones(len(points))
x = Symbol('x')
flambda = lambdify(x, f, 'numpy')
f_values = flambda(points)
return f_values
# 2d
elif hasattr(N, '__len__'):
assert len(N) == 2
X, Y = sine_points(N)
# Numbers are special
if isinstance(f, Number):
return float(f)*np.ones(len(X)).reshape(N)
x, y = symbols('x, y')
flambda = lambdify([x, y], f, 'numpy')
f_values = flambda(X, Y)
return f_values.reshape(N)
def sine_fft(f_vec):
'''
Get sine expansion coeffs of f sampled at [0, Pi] and extended oddly ... .
'''
# 1d
if f_vec.shape == (len(f_vec), ):
f_vec = np.r_[f_vec, f_vec[0], -f_vec[1:][::-1]]
F_vec = np.fft.rfft(f_vec)
# These are the coefficient values
n_points = len(f_vec)
F_vec[1:] *= -2./n_points/Sqrt(2/PI)
return F_vec.imag[1:]
#2d
elif len(f_vec.shape) == 2:
F_vec = np.zeros_like(f_vec)
# Do sine_fft on rows
for i, row in enumerate(f_vec):
F_vec[i, :] = sine_fft(row)
# Do sine_fft on cols
for j, col in enumerate(F_vec.T):
F_vec[:, j] = sine_fft(col)
return F_vec
def sine_ifft(F_vec):
'''Point values from coefficients'''
if F_vec.shape == (len(F_vec), ):
# Rescale
N = len(F_vec)
n_points = 2*len(F_vec)
F_vec /= -2./n_points/Sqrt(2/PI)
# Fake complex
F_vec = np.r_[0, F_vec]*1j
f_vec = np.fft.irfft(F_vec)
return f_vec[:N]
#2d
elif len(F_vec.shape) == 2:
f_vec = np.zeros_like(F_vec)
# Do sine_fft on rows
for i, row in enumerate(F_vec):
f_vec[i, :] = sine_ifft(row)
# Do sine_fft on cols
for j, col in enumerate(f_vec.T):
f_vec[:, j] = sine_ifft(col)
return f_vec
def load_vector(f, n, n_quad=0, n_fft=0):
'''(f, v) for v in sine basis(n).'''
# Compute the integral by numeric/symbolic integration
if n_fft == 0:
# 1d
if isinstance(n, int):
x = Symbol('x')
# Integration by sympy
if n_quad == 0:
quadrature = lambda v, f=f: quad(lambdify(x, f*v), [0, PI])
# My custome quadrature with fixed degree
else:
Q1 = Quad1d(n_quad)
quadrature = lambda v, f=f: Q1(f*v, [0, PI])
return np.array(map(quadrature, sine_basis(n)), dtype=float)
# 2d
elif hasattr(n, '__len__'):
assert len(n) == 2, 'Only 2d'
# Basis in 2d is a tensor product of basis in each directions
basis_x = sine_basis(n[0], 'x')
basis_y = sine_basis(n[1], 'y')
basis = tensor_product([basis_x, basis_y])
x, y = symbols('x, y')
# Integration by sympy
if n_quad == 0:
quadrature = \
lambda v, f=f: quad(lambdify([x, y], f*v), [0, PI], [0, PI])
# My custome quadrature with fixed degree
else:
Q2 = Quad2d(n_quad)
quadrature = lambda v, f=f: Q2(f*v, [0, PI], [0, PI])
return np.array(map(quadrature, basis), dtype=float).reshape(n)
# Integral by fft only approximate!
else:
# 1d
if isinstance(n, int):
# If f is constant this is the minimal requirement for sensible results
assert n_fft >= n
f_vec = sine_eval(n_fft, f)
F_vec = sine_fft(f_vec)[:n]
return F_vec
# 2d
elif hasattr(n, '__len__'):
assert len(n) == 2, 'Only 2d'
f_vec = sine_eval([n_fft, n_fft], f)
F_vec = sine_fft(f_vec)[:n[0], :n[1]]
return F_vec
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import lambdify, Symbol, cos, exp, S
from sympy.plotting import plot3d
n = 8
basis = sine_basis(n)
domain = [0, pi.n()]
x = Symbol('x')
# Test bending matrix
mat_value = lambda u, v: quad(lambdify(x, u.diff(x, 2)*v.diff(x, 2)), domain)
mat = np.zeros((len(basis), len(basis)))
for i, u in enumerate(basis):
mat[i, i] = mat_value(u, u)
for j, v in enumerate(basis[i+1:], i+1):
mat[i, j] = mat_value(u, v)
mat[j, i] = mat[i, j]
B = bending_matrix(n)
assert B.shape == mat.shape
assert np.allclose(B.toarray(), mat)
# Test mass
mat_value = lambda u, v: quad(lambdify(x, u*v), domain)
mat = np.zeros((len(basis), len(basis)))
for i, u in enumerate(basis):
mat[i, i] = mat_value(u, u)
for j, v in enumerate(basis[i+1:], i+1):
mat[i, j] = mat_value(u, v)
mat[j, i] = mat[i, j]
B = mass_matrix(n)
assert B.shape == mat.shape
assert np.allclose(B.toarray(), mat)
# Test stiffness
mat_value = lambda u, v: quad(lambdify(x, u.diff(x, 1)*v.diff(x, 1)), domain)
mat = np.zeros((len(basis), len(basis)))
for i, u in enumerate(basis):
mat[i, i] = mat_value(u, u)
for j, v in enumerate(basis[i+1:], i+1):
mat[i, j] = mat_value(u, v)
mat[j, i] = mat[i, j]
B = stiffness_matrix(n)
assert B.shape == mat.shape
assert np.allclose(B.toarray(), mat)
# sine FFT 1d
f = S(1)
# :f = 1*sin(x) - 2*sin(2*x)
f_vec = sine_eval(N=1000, f=f)
F_vec = sine_fft(f_vec)
f_vec_ = sine_ifft(F_vec)
print 'fft(ifft(f) - f', np.linalg.norm(f_vec - f_vec_), f
import matplotlib.pyplot as plt
points = sine_points(len(f_vec))
plt.figure()
plt.plot(points, f_vec, 'x', label='one')
plt.plot(points, f_vec_, 'o', label='two')
plt.xlim((0, np.pi))
plt.legend()
# sine FFT 2d
y = Symbol('y')
h = x*(x-pi)*sin(x+y)*y**2*(y-pi)**2
f_vec = sine_eval(N=[100, 100], f=h)
F_vec = sine_fft(f_vec)
f_vec_ = sine_ifft(F_vec)
print 'fft(ifft(f) - f', np.linalg.norm(f_vec - f_vec_), h
X, Y = sine_points(f_vec.shape)
X = X.reshape(f_vec.shape)
Y = Y.reshape(f_vec.shape)
# print f_vec
fig, (ax0, ax1) = plt.subplots(1 ,2)
ax0.pcolor(X, Y, f_vec)
ax0.set_xlim((0, np.pi))
ax0.set_ylim((0, np.pi))
ax1.pcolor(X, Y, f_vec_)
ax1.set_xlim((0, np.pi))
ax1.set_ylim((0, np.pi))
plot3d(h, (x, 0, np.pi), (y, 0, np.pi))
plt.show()
# f = sin(x) + 7*sin(2*x) - sin(4*x) # Exact
# f = sin(x)*cos(2*pi*x)*exp(x**2)
# f = exp(x)*(sum(i*x**i for i in range(1, 4)))
load_exact = np.array([quad(lambdify(x, f*v), [0, PI]) for v in basis],
dtype=float)
b = load_vector(f, len(basis))
b_ = load_vector(f, len(basis), n_fft=2**14)
print '1d error', np.linalg.norm(b - b_)
b__ = load_vector(f, len(basis), n_quad=200)
print '1d error', np.linalg.norm(b - b__)
# y = Symbol('y')
# g = sin(x)*(y**2-1)
# print load_vector(g, [2, 2])
# # How many sines you need to get the n integrals in the load vector right
# N = n
# for k in range(1, 11):
# f_vec = sine_eval(N, g)
# load_num = sine_fft(f_vec)[:n]
#
# print N, np.linalg.norm(load_exact - load_num)
# N *= 2
# y = Symbol('y')
# f = sin(x)*sin(y)
# sine_eval(N=[4, 4], f=f)
x, y = symbols('x, y')
f = x*(x-pi)*y*(y-pi)*sin(x)
import time
start = time.time()
b = load_vector(f, [5, 5])
print 'QUAD sympy', time.time() - start
start = time.time()
b_ = load_vector(f, [5, 5], n_fft=64)
print 'FFT', time.time() - start
print '2d error', np.linalg.norm(b - b_)
start = time.time()
b__ = load_vector(f, [5, 5], n_quad=200)
print 'QUAD me', time.time() - start
print '2d error', np.linalg.norm(b - b__)
|
mit
|
rahuldhote/scikit-learn
|
examples/decomposition/plot_ica_vs_pca.py
|
306
|
3329
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
|
bsd-3-clause
|
chenyyx/scikit-learn-doc-zh
|
examples/en/gaussian_process/plot_gpr_prior_posterior.py
|
36
|
2900
|
"""
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.2, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.2, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
gpl-3.0
|
ssaeger/scikit-learn
|
examples/cluster/plot_kmeans_assumptions.py
|
270
|
2040
|
"""
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
|
bsd-3-clause
|
eggplantbren/StatisticalCompass
|
mkq.py
|
2
|
2289
|
import pandas as pd
questions = pd.read_csv('questions.csv')
print '''<!DOCTYPE html>
<meta charset="utf-8">
<style>
body {
font-family: "Raleway", sans-serif;
margin: auto;
position: relative;
width: 960px;
font-size: 1.2em;
}
</style>
<html>
<head>
<link href='http://fonts.googleapis.com/css?family=Raleway' rel='stylesheet' type='text/css'>
<title>Statistical Compass Questionnaire</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
</head>
<body>
<h1> Statistical Compass Questionnaire</h1>
<h2>Enter a response from -2 (strongly disagree) to +2 (strongly agree):</h2>
<form action="#" method="">
<div data-role="fieldcontain">
'''
#print
nq=len(questions.iloc[:])
#print nq
for i in range(0, questions.shape[0]):
st="%f %f %f"%(questions.iloc[i, 1],questions.iloc[i, 2],questions.iloc[i, 3])
#print st
print questions.iloc[i, 0],'''
<br>
-2
<input class="calc" type="radio" name="radio%d" value="-2 %s">
-1
<input class="calc" type="radio" name="radio%d" value="-1 %s">
0
<input class="calc" type="radio" name="radio%d" value="0 %s">
1
<input class="calc" type="radio" name="radio%d" value="1 %s">
2
<input class="calc" type="radio" name="radio%d" value="2 %s">
</p>
'''%(i,st,i,st,i,st,i,st,i,st)
print '''
<input type="submit" name="sum" onclick="displayscore();">
</div>
</form>
<script>
var score = new Array(3);
for (i=1; i<=3; i++){score[i]=0};
function displayscore(){
console.log("here");
alert("Your position in 3D is "+ score);
}
function calcscore() {
$(".calc:checked").each(function() {
console.log($(this).val(), +$(this).val().split(" ")[0]);
for (i=1; i<=3; i++)
{
score[i] += +($(this).val().split(" ")[i])*parseInt($(this).val().split(" ")[0], 10);
console.log("here",score);
}
console.log(score[1]);
//$("input[name=sum]").val(score);
})};
$().ready(function() {
$(".calc").change(function() {
calcscore()
});
});
</script>
</body>
</html>
'''
|
mit
|
CNS-OIST/PyPe9
|
test/unittests/test_simulations/test_network.py
|
2
|
52184
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import range
from itertools import groupby
from operator import itemgetter
import itertools
import numpy
import quantities as pq
import neo
from nineml.user import (
Projection, Network, DynamicsProperties,
Population, ComponentArray, EventConnectionGroup,
MultiDynamicsProperties,
Property, RandomDistributionProperties)
from nineml.user.projection import Connectivity
from nineml.abstraction import (
Parameter, Dynamics, Regime, On, OutputEvent, StateVariable,
StateAssignment, Constant, Alias)
from nineml.abstraction.ports import (
AnalogSendPort, AnalogReceivePort, AnalogReducePort, EventSendPort,
EventReceivePort)
from nineml.user import AnalogPortConnection, ConnectionRuleProperties
from nineml import units as un
from nineml.units import ms
from nineml.values import RandomDistributionValue
from pype9.simulate.common.cells import (
ConnectionPropertySet, MultiDynamicsWithSynapsesProperties,
SynapseProperties)
from pype9.simulate.common.network import Network as BasePype9Network
from pype9.simulate.neuron.network import Network as NeuronPype9Network
from pype9.simulate.neuron import Simulation as NeuronSimulation
import ninemlcatalog
import sys
argv = sys.argv[1:] # Save argv before it is clobbered by the NEST init.
import nest # @IgnorePep8
from pype9.simulate.nest.network import Network as NestPype9Network # @IgnorePep8
from pype9.simulate.nest import Simulation as NESTSimulation # @IgnorePep8
from pype9.utils.testing import ReferenceBrunel2000 # @IgnorePep8
import pype9.utils.logging.handlers.sysout # @IgnorePep8
try:
from matplotlib import pyplot as plt
except ImportError:
pass
if __name__ == '__main__':
# Import dummy test case
from pype9.utils.testing import DummyTestCase as TestCase # @UnusedImport @UnresolvedImport @IgnorePep8
else:
from unittest import TestCase # @Reimport
nest_bookkeeping = (
'element_type', 'global_id', 'local_id', 'receptor_types',
'thread_local_id', 'frozen', 'thread', 'model',
'archiver_length', 'recordables', 'parent', 'local', 'vp',
'tau_minus', 'tau_minus_triplet', 't_spike', 'origin', 'stop', 'start',
'V_min', 'synaptic_elements', 'needs_prelim_update', 'beta_Ca',
'tau_Ca', 'Ca', 'node_uses_wfr', 'supports_precise_spikes')
NEST_RNG_SEED = 12345
NEURON_RNG_SEED = 54321
class TestBrunel2000(TestCase):
translations = {
'tau_m': 'tau__cell', 'V_th': 'v_threshold__cell',
'E_L': 0.0, 'I_e': 0.0, 'C_m': None,
'V_reset': 'v_reset__cell', 'tau_syn_in': 'tau__psr__Inhibition',
'tau_syn_ex': 'tau__psr__Excitation',
't_ref': 'refractory_period__cell',
'V_m': None} # 'v__cell'}
timestep = 0.1
pop_names = ('Exc', 'Inh', 'Ext')
proj_names = ('Excitation', 'Inhibition', 'External')
conn_param_names = ['weight', 'delay']
record_params = {'Exc': {'nineml': ['v__cell',
'b__psr__Excitation',
'b__psr__Inhibition',
'b__psr__External'],
'reference': ['V_m']},
'Inh': {'nineml': ['v__cell',
'b__psr__Excitation',
'b__psr__Inhibition',
'b__psr__External'],
'reference': ['V_m']},
'Ext': {'nineml': [], 'reference': []}}
rate_percent_error = {'Exc': 7.5, 'Inh': 7.5, 'Ext': 2.5}
psth_percent_error = {'Exc': 100.0, 'Inh': 100.0, 'Ext': 100.0}
out_stdev_error = {('Exc', 'Exc'): 7.5, ('Exc', 'Inh'): 7.5,
('Inh', 'Exc'): 7.5, ('Inh', 'Inh'): 7.5,
('Ext', 'Exc'): 0.0, ('Ext', 'Inh'): 0.0}
def setUp(self):
self.simulations = {
'nest': NESTSimulation(
dt=self.timestep * un.ms, seed=NEST_RNG_SEED,
min_delay=ReferenceBrunel2000.min_delay,
max_delay=ReferenceBrunel2000.max_delay),
'neuron': NeuronSimulation(
dt=self.timestep * un.ms, seed=NEURON_RNG_SEED,
min_delay=ReferenceBrunel2000.min_delay,
max_delay=ReferenceBrunel2000.max_delay)}
def test_population_params(self, case='AI', order=10, **kwargs): # @UnusedVariable @IgnorePep8
with self.simulations['nest']:
nml = self._construct_nineml(case, order, 'nest')
ref = ReferenceBrunel2000(case, order)
for pop_name in ('Exc', 'Inh'):
params = {}
means = {}
stdevs = {}
for model_ver in ('nineml', 'reference'):
if model_ver == 'nineml':
inds = list(nml.component_array(pop_name).all_cells)
else:
inds = ref[pop_name]
param_names = [
n for n in list(nest.GetStatus([inds[0]])[0].keys())
if n not in nest_bookkeeping]
params[model_ver] = dict(
zip(param_names, zip(*nest.GetStatus(
inds, keys=param_names))))
means[model_ver] = {}
stdevs[model_ver] = {}
for param_name, values in params[model_ver].items():
vals = numpy.asarray(values)
try:
means[model_ver][param_name] = numpy.mean(vals)
except:
means[model_ver][param_name] = None
try:
stdevs[model_ver][param_name] = numpy.std(vals)
except:
stdevs[model_ver][param_name] = None
for stat_name, stat in (('mean', means),
('standard deviation', stdevs)):
for param_name in stat['reference']:
nml_param_name = self.translations.get(
param_name, param_name + '__cell')
if nml_param_name is not None: # Equivalent parameter
if isinstance(nml_param_name, (float, int)):
if stat_name == 'mean':
nineml_stat = nml_param_name
else:
nineml_stat = 0.0
else:
nineml_stat = stat['nineml'][nml_param_name]
reference_stat = stat['reference'][param_name]
self.assertAlmostEqual(
reference_stat, nineml_stat,
msg=("'{}' {} is not almost equal between "
"reference ({}) and nineml ({}) in '{}'"
.format(param_name, stat_name,
reference_stat, nineml_stat,
pop_name)))
else:
pass
def test_connection_degrees(self, case='AI', order=500, **kwargs): # @UnusedVariable @IgnorePep8
"""
Compares the in/out degree of all projections in the 9ML network with
the corresponding projections in the reference network
"""
with self.simulations['nest']:
nml = self._construct_nineml(case, order, 'nest')
ref = ReferenceBrunel2000(case, order)
for pop1_name, pop2_name in self.out_stdev_error:
in_degree = {}
out_degree = {}
for model_ver, pop1, pop2 in [
('nineml', nml.component_array(pop1_name).all_cells,
nml.component_array(pop2_name).all_cells),
('reference', numpy.asarray(ref[pop1_name]),
numpy.asarray(ref[pop2_name]))]:
conns = numpy.asarray(nest.GetConnections(list(pop1),
list(pop2)))
out_degree[model_ver] = numpy.array(
[numpy.count_nonzero(conns[:, 0] == i) for i in pop1])
in_degree[model_ver] = numpy.array(
[numpy.count_nonzero(conns[:, 1] == i) for i in pop2])
nineml_out_mean = numpy.mean(out_degree['nineml'])
ref_out_mean = numpy.mean(out_degree['reference'])
self.assertEqual(
nineml_out_mean, ref_out_mean,
"Mean out degree of '{}' to '{}' projection ({}) doesn't "
"match reference ({})".format(
pop1_name, pop2_name, nineml_out_mean, ref_out_mean))
nineml_in_mean = numpy.mean(in_degree['nineml'])
ref_in_mean = numpy.mean(in_degree['reference'])
self.assertEqual(
nineml_in_mean, ref_in_mean,
"Mean in degree of '{}' to '{}' projection ({}) doesn't "
"match reference ({})".format(
pop1_name, pop2_name, nineml_in_mean, ref_in_mean))
nineml_in_stdev = numpy.std(in_degree['nineml'])
ref_in_stdev = numpy.std(in_degree['reference'])
self.assertEqual(
nineml_in_stdev, ref_in_stdev,
"Std. of in degree of '{}' to '{}' projection ({}) doesn't"
" match reference ({})".format(
pop1_name, pop2_name, nineml_in_stdev, ref_in_stdev))
nineml_out_stdev = numpy.std(out_degree['nineml'])
ref_out_stdev = numpy.std(out_degree['reference'])
percent_error = abs(nineml_out_stdev /
ref_out_stdev - 1.0) * 100.0
self.assertLessEqual(
percent_error, self.out_stdev_error[(pop1_name,
pop2_name)],
"Std. of out degree of '{}' to '{}' projection ({}) "
"doesn't match reference ({}) within {}% ({}%)".format(
pop1_name, pop2_name, nineml_out_stdev, ref_out_stdev,
self.out_stdev_error[(pop1_name, pop2_name)],
percent_error))
def test_connection_params(self, case='AI', order=10, **kwargs): # @UnusedVariable @IgnorePep8
with self.simulations['nest']:
nml = self._construct_nineml(case, order, 'nest')
ref = ReferenceBrunel2000(case, order)
ref_conns = ref.projections
for conn_group in nml.connection_groups:
nml_conns = conn_group.nest_connections
nml_params = dict(zip(
self.conn_param_names, zip(
*nest.GetStatus(nml_conns, self.conn_param_names))))
# Since the weight is constant it is set as a parameter of the
# cell class not a connection parameter and it is scaled by
# exp because of the difference between the alpha synapse
# definition in the catalog and the nest/neuron synapses
nml_params['weight'] = nest.GetStatus(
list(conn_group.post.all_cells),
'weight__pls__{}'.format(conn_group.name)) / numpy.exp(1.0)
ref_params = dict(zip(
self.conn_param_names, zip(
*nest.GetStatus(ref_conns[conn_group.name],
self.conn_param_names))))
for attr in self.conn_param_names:
ref_mean = numpy.mean(ref_params[attr])
ref_stdev = numpy.std(ref_params[attr])
nml_mean = numpy.mean(nml_params[attr])
nml_stdev = numpy.std(nml_params[attr])
self.assertAlmostEqual(
ref_mean, nml_mean,
msg=("'{}' mean is not almost equal between "
"reference ({}) and nineml ({}) in '{}'"
.format(attr, ref_mean, nml_mean,
conn_group.name)))
self.assertAlmostEqual(
ref_stdev, nml_stdev,
msg=("'{}' mean is not almost equal between "
"reference ({}) and nineml ({}) in '{}'"
.format(attr, ref_stdev, nml_stdev,
conn_group.name)))
def test_sizes(self, case='AI', order=100, **kwargs): # @UnusedVariable @IgnorePep8
with self.simulations['nest']:
nml_network = self._construct_nineml(case, order, 'nest')
nml = dict((p.name, p.all_cells)
for p in nml_network.component_arrays)
ref = ReferenceBrunel2000(case, order)
# Test sizes of component arrays
for name in ('Exc', 'Inh'):
nml_size = len(nml[name])
ref_size = len(ref[name])
self.assertEqual(
nml_size, ref_size,
"Size of '{}' component array ({}) does not match "
"reference ({})".format(name, nml_size, ref_size))
ref_conns = ref.projections
for conn_group in nml_network.connection_groups:
nml_size = len(conn_group)
ref_size = len(ref_conns[conn_group.name])
self.assertEqual(
nml_size, ref_size,
"Number of connections in '{}' ({}) does not match "
"reference ({})".format(conn_group.name, nml_size,
ref_size))
def test_activity(self, case='AI', order=50, simtime=250.0, plot=False,
record_size=50, record_pops=('Exc', 'Inh', 'Ext'),
record_states=False, record_start=0.0, bin_width=4.0,
identical_input=False, identical_connections=False,
identical_initialisation=False, build_mode='force',
**kwargs):
if identical_input:
mean_isi = 1000.0 / ReferenceBrunel2000.parameters(case, order)[-1]
if isinstance(identical_input, int):
mean_isi *= identical_input
external_input = []
for _ in range(order * 5):
# Generate poisson spike trains using numpy
spike_times = numpy.cumsum(numpy.random.exponential(
mean_isi, int(numpy.floor(1.5 * simtime / mean_isi))))
# Trim spikes outside the simulation time
spike_times = spike_times[numpy.logical_and(
spike_times < simtime,
spike_times > ReferenceBrunel2000.min_delay)]
# Append a Neo SpikeTrain input to the external input
external_input.append(
neo.SpikeTrain(spike_times, units='ms',
t_stop=simtime * pq.ms))
else:
external_input = None
record_duration = simtime - record_start
# Construct 9ML network
with self.simulations['nest'] as sim:
nml_network = self._construct_nineml(
case, order, 'nest', external_input=external_input,
build_mode=build_mode, **kwargs)
nml = dict((p.name, list(p.all_cells))
for p in nml_network.component_arrays)
if identical_connections:
connections = {}
for p1_name, p2_name in itertools.product(*([('Exc', 'Inh')] *
2)):
p1 = list(nml_network.component_array(p1_name).all_cells)
p2 = list(nml_network.component_array(p2_name).all_cells)
conns = numpy.asarray(nest.GetConnections(p1, p2))
conns[:, 0] -= p1[0]
conns[:, 1] -= p2[0]
assert numpy.all(conns[0:2, :] >= 0)
connections[(p1_name, p2_name)] = conns
else:
connections = None
if identical_initialisation == 'zero':
init_v = {}
for pname in ('Exc', 'Inh'):
pop = list(nml_network.component_array(pname).all_cells)
zeros = list(
numpy.zeros(len(nml_network.component_array(pname))))
nest.SetStatus(pop, 'v__cell', zeros)
init_v[pname] = zeros
elif identical_initialisation:
init_v = {}
for p_name in ('Exc', 'Inh'):
pop = list(nml_network.component_array(p_name).all_cells)
init_v[p_name] = nest.GetStatus(pop, 'v__cell')
else:
init_v = None
# Construct reference network
ref = ReferenceBrunel2000(
case, order, external_input=external_input,
connections=connections, init_v=init_v)
# Set up spike recorders for reference network
pops = {'nineml': nml, 'reference': ref}
spikes = {}
multi = {}
for model_ver in ('nineml', 'reference'):
spikes[model_ver] = {}
multi[model_ver] = {}
for pop_name in record_pops:
pop = numpy.asarray(pops[model_ver][pop_name], dtype=int)
record_inds = numpy.asarray(numpy.unique(numpy.floor(
numpy.arange(start=0, stop=len(pop),
step=len(pop) / record_size))), dtype=int)
spikes[model_ver][pop_name] = nest.Create("spike_detector")
nest.SetStatus(spikes[model_ver][pop_name],
[{"label": "brunel-py-" + pop_name,
"withtime": True, "withgid": True}])
nest.Connect(list(pop[record_inds]),
spikes[model_ver][pop_name],
syn_spec="excitatory")
if record_states:
# Set up voltage traces recorders for reference network
if self.record_params[pop_name][model_ver]:
multi[model_ver][pop_name] = nest.Create(
'multimeter',
params={
'record_from':
self.record_params[pop_name][model_ver]})
nest.Connect(multi[model_ver][pop_name],
list(pop[record_inds]))
# Simulate the network
sim.run(simtime * un.ms)
rates = {'reference': {}, 'nineml': {}}
psth = {'reference': {}, 'nineml': {}}
for model_ver in ('reference', 'nineml'):
for pop_name in record_pops:
events = nest.GetStatus(spikes[model_ver][pop_name],
"events")[0]
spike_times = numpy.asarray(events['times'])
senders = numpy.asarray(events['senders'])
inds = numpy.asarray(spike_times > record_start, dtype=bool)
spike_times = spike_times[inds]
senders = senders[inds]
rates[model_ver][pop_name] = (
1000.0 * len(spike_times) / record_duration)
psth[model_ver][pop_name] = (
numpy.histogram(
spike_times,
bins=int(numpy.floor(record_duration /
bin_width)))[0] /
bin_width)
if plot:
plt.figure()
plt.scatter(spike_times, senders)
plt.xlabel('Time (ms)')
plt.ylabel('Cell Indices')
plt.title("{} - {} Spikes".format(model_ver, pop_name))
plt.figure()
plt.hist(spike_times,
bins=int(
numpy.floor(record_duration / bin_width)))
plt.xlabel('Time (ms)')
plt.ylabel('Rate')
plt.title("{} - {} PSTH".format(model_ver, pop_name))
if record_states:
for param in self.record_params[pop_name][model_ver]:
events, interval = nest.GetStatus(
multi[model_ver][pop_name], ["events",
'interval'])[0]
sorted_vs = sorted(zip(events['senders'],
events['times'],
events[param]),
key=itemgetter(0))
plt.figure()
legend = []
for sender, group in groupby(sorted_vs,
key=itemgetter(0)):
_, t, v = list(zip(*group))
t = numpy.asarray(t)
v = numpy.asarray(v)
inds = t > record_start
plt.plot(t[inds] * interval, v[inds])
legend.append(sender)
plt.xlabel('Time (ms)')
plt.ylabel(param)
plt.title("{} - {} {}".format(model_ver, pop_name,
param))
plt.legend(legend)
for pop_name in record_pops:
if rates['reference'][pop_name]:
percent_rate_error = abs(
rates['nineml'][pop_name] /
rates['reference'][pop_name] - 1.0) * 100
elif not rates['nineml'][pop_name]:
percent_rate_error = 0.0
else:
percent_rate_error = float('inf')
self.assertLess(
percent_rate_error,
self.rate_percent_error[pop_name], msg=(
"Rate of '{}' ({}) doesn't match reference ({}) within {}%"
" ({}%)".format(pop_name, rates['nineml'][pop_name],
rates['reference'][pop_name],
self.rate_percent_error[pop_name],
percent_rate_error)))
if numpy.std(psth['reference'][pop_name]):
percent_psth_stdev_error = abs(
numpy.std(psth['nineml'][pop_name]) /
numpy.std(psth['reference'][pop_name]) - 1.0) * 100
elif not numpy.std(psth['nineml'][pop_name]):
percent_psth_stdev_error = 0.0
else:
percent_psth_stdev_error = float('inf')
self.assertLess(
percent_psth_stdev_error,
self.psth_percent_error[pop_name],
msg=(
"Std. Dev. of PSTH for '{}' ({}) doesn't match "
"reference ({}) within {}% ({}%)".format(
pop_name,
numpy.std(psth['nineml'][pop_name]) / bin_width,
numpy.std(psth['reference'][pop_name]) / bin_width,
self.psth_percent_error[pop_name],
percent_psth_stdev_error)))
if plot:
plt.show()
def test_activity_with_neuron(self, case='AI', order=10, simtime=100.0,
bin_width=4.0, simulators=['neuron', 'nest'],
record_states=True, plot=False,
build_mode='force', **kwargs): # @IgnorePep8 @UnusedVariable
data = {}
# Set up recorders for 9ML network
rates = {}
psth = {}
for simulator in simulators:
data[simulator] = {}
with self.simulations[simulator] as sim:
network = self._construct_nineml(case, order, simulator,
**kwargs)
for pop in network.component_arrays:
pop.record('spike_output')
if record_states and pop.name != 'Ext':
pop.record('v__cell')
sim.run(simtime * un.ms)
rates[simulator] = {}
psth[simulator] = {}
for pop in network.component_arrays:
block = data[simulator][pop.name] = pop.get_data()
segment = block.segments[0]
spiketrains = segment.spiketrains
spike_times = []
ids = []
for i, spiketrain in enumerate(spiketrains):
spike_times.extend(spiketrain)
ids.extend([i] * len(spiketrain))
rates[simulator][pop.name] = len(spike_times) / (simtime *
pq.ms)
psth[simulator][pop.name] = numpy.histogram(
spike_times,
bins=int(numpy.floor(simtime /
bin_width)))[0] / bin_width
if plot:
plt.figure()
plt.scatter(spike_times, ids)
plt.xlabel('Times (ms)')
plt.ylabel('Cell Indices')
plt.title("{} - {} Spikes".format(simulator, pop.name))
if record_states and pop.name != 'Ext':
traces = segment.analogsignalarrays
plt.figure()
legend = []
for trace in traces:
plt.plot(trace.times, trace)
legend.append(trace.name)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane Voltage (mV)')
plt.title("{} - {} Membrane Voltage".format(
simulator, pop.name))
plt.legend(legend)
for pop in network.component_arrays:
if rates['nest'][pop.name]:
percent_rate_error = abs(
rates['neuron'][pop.name] /
rates['nest'][pop.name] - 1.0) * 100
elif not rates['neuron'][pop.name]:
percent_rate_error = 0.0
else:
percent_rate_error = float('inf')
self.assertLess(
percent_rate_error,
self.rate_percent_error[pop.name], msg=(
"Rate of NEURON '{}' ({}) doesn't match NEST ({}) within "
"{}% ({}%)".format(pop.name, rates['neuron'][pop.name],
rates['nest'][pop.name],
self.rate_percent_error[pop.name],
percent_rate_error)))
if numpy.std(psth['nest'][pop.name]):
percent_psth_stdev_error = abs(
numpy.std(psth['neuron'][pop.name]) /
numpy.std(psth['nest'][pop.name]) - 1.0) * 100
elif not numpy.std(psth['neuron'][pop.name]):
percent_psth_stdev_error = 0.0
else:
percent_psth_stdev_error = float('inf')
self.assertLess(
percent_psth_stdev_error,
self.psth_percent_error[pop.name],
msg=(
"Std. Dev. of PSTH for NEURON '{}' ({}) doesn't match "
"NEST ({}) within {}% ({}%)".format(
pop.name,
numpy.std(psth['neuron'][pop.name]) / bin_width,
numpy.std(psth['nest'][pop.name]) / bin_width,
self.psth_percent_error[pop.name],
percent_psth_stdev_error)))
if plot:
plt.show()
print("done")
def test_flatten(self, **kwargs): # @UnusedVariable
brunel_network = ninemlcatalog.load(
'network/Brunel2000/AI/').as_network('brunel_ai')
(component_arrays, connection_groups,
selections) = BasePype9Network._flatten_to_arrays_and_conns(
brunel_network)
self.assertEqual(len(component_arrays), 3)
self.assertEqual(len(connection_groups), 3)
self.assertEqual(len(selections), 1)
def _construct_nineml(self, case, order, simulator, external_input=None,
**kwargs):
model = ninemlcatalog.load('network/Brunel2000/' + case).as_network(
'Brunel_{}'.format(case))
model = model.clone()
scale = order / model.population('Inh').size
# rescale populations
for pop in model.populations:
pop.size = int(numpy.ceil(pop.size * scale))
for proj in (model.projection('Excitation'),
model.projection('Inhibition')):
props = proj.connectivity.rule_properties
number = props.property('number')
props.set(Property(
number.name,
int(numpy.ceil(float(number.value) * scale)) * un.unitless))
if simulator == 'nest':
NetworkClass = NestPype9Network
elif simulator == 'neuron':
NetworkClass = NeuronPype9Network
else:
assert False
network = NetworkClass(model, **kwargs)
if external_input is not None:
network.component_array('Ext').play('spike_input__cell',
external_input)
return network
class TestNetwork(TestCase):
delay = 1.5 * un.ms
def setUp(self):
self.all_to_all = ConnectionRuleProperties(
'all_to_all_props', ninemlcatalog.load('/connectionrule/AllToAll',
'AllToAll'))
def test_component_arrays_and_connection_groups(self, **kwargs): # @UnusedVariable @IgnorePep8
# =====================================================================
# Dynamics components
# =====================================================================
cell1_cls = Dynamics(
name='Cell',
state_variables=[
StateVariable('SV1', dimension=un.voltage)],
regimes=[
Regime(
'dSV1/dt = -SV1 / P1 + i_ext / P2',
transitions=[On('SV1 > P3', do=[OutputEvent('spike')])],
name='R1')],
analog_ports=[AnalogReducePort('i_ext', dimension=un.current,
operator='+'),
EventSendPort('spike')],
parameters=[Parameter('P1', dimension=un.time),
Parameter('P2', dimension=un.capacitance),
Parameter('P3', dimension=un.voltage)])
cell2_cls = Dynamics(
name='Cell',
state_variables=[
StateVariable('SV1', dimension=un.voltage)],
regimes=[
Regime(
'dSV1/dt = -SV1 ^ 2 / P1 + i_ext / P2',
transitions=[On('SV1 > P3', do=[OutputEvent('spike')]),
On('SV1 > P4',
do=[OutputEvent('double_spike')])],
name='R1')],
analog_ports=[AnalogReducePort('i_ext', dimension=un.current,
operator='+')],
parameters=[Parameter('P1', dimension=un.time * un.voltage),
Parameter('P2', dimension=un.capacitance),
Parameter('P3', dimension=un.voltage),
Parameter('P4', dimension=un.voltage)])
exc_cls = Dynamics(
name="Exc",
aliases=["i := SV1"],
regimes=[
Regime(
name="default",
time_derivatives=[
"dSV1/dt = SV1/tau"],
transitions=[
On('spike', do=["SV1 = SV1 + weight"]),
On('double_spike', do=['SV1 = SV1 + 2 * weight'])])],
state_variables=[
StateVariable('SV1', dimension=un.current),
],
analog_ports=[AnalogSendPort("i", dimension=un.current),
AnalogReceivePort("weight", dimension=un.current)],
parameters=[Parameter('tau', dimension=un.time)])
inh_cls = Dynamics(
name="Inh",
aliases=["i := SV1"],
regimes=[
Regime(
name="default",
time_derivatives=[
"dSV1/dt = SV1/tau"],
transitions=On('spike', do=["SV1 = SV1 - weight"]))],
state_variables=[
StateVariable('SV1', dimension=un.current),
],
analog_ports=[AnalogSendPort("i", dimension=un.current),
AnalogReceivePort("weight", dimension=un.current)],
parameters=[Parameter('tau', dimension=un.time)])
static_cls = Dynamics(
name="Static",
aliases=["fixed_weight := weight"],
regimes=[
Regime(name="default")],
analog_ports=[AnalogSendPort("fixed_weight",
dimension=un.current)],
parameters=[Parameter('weight', dimension=un.current)])
stdp_cls = Dynamics(
name="PartialStdpGuetig",
parameters=[
Parameter(name='tauLTP', dimension=un.time),
Parameter(name='aLTD', dimension=un.dimensionless),
Parameter(name='wmax', dimension=un.dimensionless),
Parameter(name='muLTP', dimension=un.dimensionless),
Parameter(name='tauLTD', dimension=un.time),
Parameter(name='aLTP', dimension=un.dimensionless)],
analog_ports=[
AnalogSendPort(dimension=un.dimensionless, name="wsyn"),
AnalogSendPort(dimension=un.current, name="wsyn_current")],
event_ports=[
EventReceivePort(name="incoming_spike")],
state_variables=[
StateVariable(name='tlast_post', dimension=un.time),
StateVariable(name='tlast_pre', dimension=un.time),
StateVariable(name='deltaw', dimension=un.dimensionless),
StateVariable(name='interval', dimension=un.time),
StateVariable(name='M', dimension=un.dimensionless),
StateVariable(name='P', dimension=un.dimensionless),
StateVariable(name='wsyn', dimension=un.dimensionless)],
constants=[Constant('ONE_NA', 1.0, un.nA)],
regimes=[
Regime(
name="sole",
transitions=On(
'incoming_spike',
to='sole',
do=[
StateAssignment('tlast_post', 't'),
StateAssignment('tlast_pre', 'tlast_pre'),
StateAssignment(
'deltaw',
'P*pow(wmax - wsyn, muLTP) * '
'exp(-interval/tauLTP) + deltaw'),
StateAssignment('interval', 't - tlast_pre'),
StateAssignment(
'M', 'M*exp((-t + tlast_post)/tauLTD) - aLTD'),
StateAssignment(
'P', 'P*exp((-t + tlast_pre)/tauLTP) + aLTP'),
StateAssignment('wsyn', 'deltaw + wsyn')]))],
aliases=[Alias('wsyn_current', 'wsyn * ONE_NA')])
exc = DynamicsProperties(
name="ExcProps",
definition=exc_cls, properties={'tau': 1 * ms})
inh = DynamicsProperties(
name="ExcProps",
definition=inh_cls, properties={'tau': 1 * ms})
random_weight = un.Quantity(RandomDistributionValue(
RandomDistributionProperties(
name="normal",
definition=ninemlcatalog.load(
'randomdistribution/Normal', 'NormalDistribution'),
properties={'mean': 1.0, 'variance': 0.25})), un.nA)
random_wmax = un.Quantity(RandomDistributionValue(
RandomDistributionProperties(
name="normal",
definition=ninemlcatalog.load(
'randomdistribution/Normal', 'NormalDistribution'),
properties={'mean': 2.0, 'variance': 0.5})))
static = DynamicsProperties(
name="StaticProps",
definition=static_cls,
properties={'weight': random_weight})
stdp = DynamicsProperties(name="StdpProps", definition=stdp_cls,
properties={'tauLTP': 10 * un.ms,
'aLTD': 1,
'wmax': random_wmax,
'muLTP': 3,
'tauLTD': 20 * un.ms,
'aLTP': 4})
cell1 = DynamicsProperties(
name="Pop1Props",
definition=cell1_cls,
properties={'P1': 10 * un.ms,
'P2': 100 * un.uF,
'P3': -50 * un.mV})
cell2 = DynamicsProperties(
name="Pop2Props",
definition=cell2_cls,
properties={'P1': 20 * un.ms * un.mV,
'P2': 50 * un.uF,
'P3': -40 * un.mV,
'P4': -20 * un.mV})
cell3 = DynamicsProperties(
name="Pop3Props",
definition=cell1_cls,
properties={'P1': 30 * un.ms,
'P2': 50 * un.pF,
'P3': -20 * un.mV})
# =====================================================================
# Populations and Projections
# =====================================================================
pop1 = Population(
name="Pop1",
size=10,
cell=cell1)
pop2 = Population(
name="Pop2",
size=15,
cell=cell2)
pop3 = Population(
name="Pop3",
size=20,
cell=cell3)
proj1 = Projection(
name="Proj1",
pre=pop1, post=pop2, response=inh, plasticity=static,
connection_rule_properties=self.all_to_all,
port_connections=[
('pre', 'spike', 'response', 'spike'),
('response', 'i', 'post', 'i_ext'),
('plasticity', 'fixed_weight', 'response', 'weight')],
delay=self.delay)
proj2 = Projection(
name="Proj2",
pre=pop2, post=pop1, response=exc, plasticity=static,
connection_rule_properties=self.all_to_all,
port_connections=[
('pre', 'spike', 'response', 'spike'),
('pre', 'double_spike', 'response', 'double_spike'),
('response', 'i', 'post', 'i_ext'),
('plasticity', 'fixed_weight', 'response', 'weight')],
delay=self.delay)
proj3 = Projection(
name="Proj3",
pre=pop3, post=pop2, response=exc, plasticity=stdp,
connection_rule_properties=self.all_to_all,
port_connections=[
('pre', 'spike', 'response', 'spike'),
('response', 'i', 'post', 'i_ext'),
('plasticity', 'wsyn_current', 'response', 'weight'),
('pre', 'spike', 'plasticity', 'incoming_spike')],
delay=self.delay)
proj4 = Projection(
name="Proj4",
pre=pop3, post=pop1, response=exc, plasticity=static,
connection_rule_properties=self.all_to_all,
port_connections=[
('pre', 'spike', 'response', 'spike'),
('response', 'i', 'post', 'i_ext'),
('plasticity', 'fixed_weight', 'response', 'weight')],
delay=self.delay)
# =====================================================================
# Construct the Network
# =====================================================================
network = Network(
name="Net",
populations=(pop1, pop2, pop3),
projections=(proj1, proj2, proj3, proj4))
# =====================================================================
# Create expected dynamics arrays
# =====================================================================
dyn_array1 = ComponentArray(
"Pop1", pop1.size,
MultiDynamicsWithSynapsesProperties(
"Pop1_cell",
MultiDynamicsProperties(
"Pop1_cell",
sub_components={
'cell': cell1,
'Proj2': MultiDynamicsProperties(
name='Proj2_syn',
sub_components={'psr': exc.clone(),
'pls': static.clone()},
port_connections=[
('pls', 'fixed_weight', 'psr', 'weight')],
port_exposures=[
('psr', 'i'),
('psr', 'spike'),
('psr', 'double_spike')]),
'Proj4': MultiDynamicsProperties(
name='Proj4_syn',
sub_components={'psr': exc.clone(),
'pls': static.clone()},
port_connections=[
('pls', 'fixed_weight', 'psr', 'weight')],
port_exposures=[
('psr', 'i'),
('psr', 'spike')])},
port_connections=[
('Proj2', 'i__psr', 'cell', 'i_ext'),
('Proj4', 'i__psr', 'cell', 'i_ext')],
port_exposures=[
('cell', 'spike'),
('Proj2', 'double_spike__psr'),
('Proj2', 'spike__psr'),
('Proj4', 'spike__psr')]),
connection_property_sets=[
ConnectionPropertySet(
'spike__psr__Proj2',
[Property('weight__pls__Proj2', random_weight)]),
ConnectionPropertySet(
'double_spike__psr__Proj2',
[Property('weight__pls__Proj2', random_weight)]),
ConnectionPropertySet(
'spike__psr__Proj4',
[Property('weight__pls__Proj4', random_weight)])]))
dyn_array2 = ComponentArray(
"Pop2", pop2.size,
MultiDynamicsWithSynapsesProperties(
"Pop2_cell",
MultiDynamicsProperties(
"Pop2_cell",
sub_components={
'cell': cell2,
'Proj1': MultiDynamicsProperties(
name='Proj1_syn',
sub_components={'psr': inh.clone(),
'pls': static.clone()},
port_connections=[
('pls', 'fixed_weight', 'psr', 'weight')],
port_exposures=[
('psr', 'i'),
('psr', 'spike')])},
port_connections=[
('Proj1', 'i__psr', 'cell', 'i_ext')],
port_exposures=[
('cell', 'spike'),
('cell', 'double_spike'),
('Proj1', 'spike__psr'),
('cell', 'i_ext')]),
connection_property_sets=[
ConnectionPropertySet(
'spike__psr__Proj1',
[Property('weight__pls__Proj1', random_weight)])],
synapse_propertiess=[
SynapseProperties(
name='Proj3',
dynamics_properties=MultiDynamicsProperties(
name='Proj3_syn',
sub_components={'psr': exc,
'pls': stdp},
port_connections=[
AnalogPortConnection(
'wsyn_current', 'weight',
sender_name='pls', receiver_name='psr')],
port_exposures=[('psr', 'spike'),
('pls', 'incoming_spike'),
('psr', 'i')]),
port_connections=[
AnalogPortConnection(
'i__psr__Proj3', 'i_ext__cell__reduce',
sender_role='synapse',
receiver_role='post')])]))
dyn_array3 = ComponentArray(
"Pop3", pop3.size,
MultiDynamicsWithSynapsesProperties(
"Pop3_cell",
MultiDynamicsProperties(
'Pop3_cell',
sub_components={'cell': cell3},
port_exposures=[('cell', 'spike'),
('cell', 'i_ext')],
port_connections=[])))
conn_group1 = EventConnectionGroup(
'Proj1', dyn_array1, dyn_array2, 'spike__cell',
'spike__psr__Proj1',
connectivity=Connectivity(self.all_to_all, pop1.size, pop2.size),
delay=self.delay)
conn_group2 = EventConnectionGroup(
'Proj2__pre__spike__synapse__spike__psr', dyn_array2,
dyn_array1, 'spike__cell',
'spike__psr__Proj2',
connectivity=Connectivity(self.all_to_all, pop2.size, pop1.size),
delay=self.delay)
conn_group3 = EventConnectionGroup(
'Proj2__pre__double_spike__synapse__double_spike__psr',
dyn_array2, dyn_array1, 'double_spike__cell',
'double_spike__psr__Proj2',
connectivity=Connectivity(self.all_to_all, pop2.size, pop1.size),
delay=self.delay)
conn_group4 = EventConnectionGroup(
'Proj3__pre__spike__synapse__spike__psr', dyn_array3,
dyn_array2, 'spike__cell',
'spike__psr__Proj3',
connectivity=Connectivity(self.all_to_all, pop3.size, pop2.size),
delay=self.delay)
conn_group5 = EventConnectionGroup(
'Proj3__pre__spike__synapse__incoming_spike__pls',
dyn_array3, dyn_array2,
'spike__cell',
'incoming_spike__pls__Proj3',
connectivity=Connectivity(self.all_to_all, pop3.size, pop2.size),
delay=self.delay)
conn_group6 = EventConnectionGroup(
'Proj4', dyn_array3, dyn_array1,
'spike__cell',
'spike__psr__Proj4',
connectivity=Connectivity(self.all_to_all, pop3.size, pop1.size),
delay=self.delay)
# =====================================================================
# Test equality between network automatically generated dynamics arrays
# and manually generated expected one
# =====================================================================
(component_arrays, connection_groups,
_) = BasePype9Network._flatten_to_arrays_and_conns(network)
self.assertEqual(
component_arrays['Pop1'], dyn_array1,
"Mismatch between generated and expected dynamics arrays:\n {}"
.format(component_arrays['Pop1'].find_mismatch(dyn_array1)))
self.assertEqual(
component_arrays['Pop2'], dyn_array2,
"Mismatch between generated and expected dynamics arrays:\n {}"
.format(component_arrays['Pop2'].find_mismatch(dyn_array2)))
self.assertEqual(
component_arrays['Pop3'], dyn_array3,
"Mismatch between generated and expected dynamics arrays:\n {}"
.format(component_arrays['Pop3'].find_mismatch(dyn_array3)))
# =====================================================================
# Test equality between network automatically generated connection
# groups and manually generated expected ones
# =====================================================================
self.assertEqual(
connection_groups['Proj1'], conn_group1,
"Mismatch between generated and expected connection groups:\n {}"
.format(
connection_groups['Proj1'].find_mismatch(conn_group1)))
self.assertEqual(
connection_groups['Proj2__pre__spike__synapse__spike__psr'],
conn_group2,
"Mismatch between generated and expected connection groups:\n {}"
.format(
connection_groups['Proj2__pre__spike__synapse__spike__psr']
.find_mismatch(conn_group2)))
self.assertEqual(
connection_groups[
'Proj2__pre__double_spike__synapse__double_spike__psr'],
conn_group3,
"Mismatch between generated and expected connection groups:\n {}"
.format(
connection_groups[
'Proj2__pre__double_spike__synapse__double_spike__psr']
.find_mismatch(conn_group3)))
self.assertEqual(
connection_groups[
'Proj3__pre__spike__synapse__spike__psr'],
conn_group4,
"Mismatch between generated and expected connection groups:\n {}"
.format(
connection_groups[
'Proj3__pre__spike__synapse__spike__psr']
.find_mismatch(conn_group4)))
self.assertEqual(
connection_groups[
'Proj3__pre__spike__synapse__incoming_spike__pls'],
conn_group5,
"Mismatch between generated and expected connection groups:\n {}"
.format(
connection_groups[
'Proj3__pre__spike__synapse__incoming_spike__pls']
.find_mismatch(conn_group5)))
self.assertEqual(
connection_groups['Proj4'], conn_group6,
"Mismatch between generated and expected connection groups:\n {}"
.format(
connection_groups['Proj4'] .find_mismatch(conn_group6)))
|
mit
|
materialsproject/pymatgen
|
pymatgen/analysis/magnetism/heisenberg.py
|
1
|
37328
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a simple algorithm for extracting nearest neighbor
exchange parameters by mapping low energy magnetic orderings to a Heisenberg
model.
"""
import copy
import logging
import sys
from ast import literal_eval
import numpy as np
import pandas as pd
from monty.json import MSONable, jsanitize
from monty.serialization import dumpfn
from pymatgen.core.structure import Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.analysis.magnetism import CollinearMagneticStructureAnalyzer, Ordering
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "ncfrey"
__version__ = "0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "June 2019"
class HeisenbergMapper:
"""
Class to compute exchange parameters from low energy magnetic orderings.
"""
def __init__(self, ordered_structures, energies, cutoff=0.0, tol=0.02):
"""
Exchange parameters are computed by mapping to a classical Heisenberg
model. Strategy is the scheme for generating neighbors. Currently only
MinimumDistanceNN is implemented.
n+1 unique orderings are required to compute n exchange
parameters.
First run a MagneticOrderingsWF to obtain low energy collinear magnetic
orderings and find the magnetic ground state. Then enumerate magnetic
states with the ground state as the input structure, find the subset
of supercells that map to the ground state, and do static calculations
for these orderings.
Args:
ordered_structures (list): Structure objects with magmoms.
energies (list): Total energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
Defaults to 0 (only NN, no NNN, etc.)
tol (float): Tolerance (in Angstrom) on nearest neighbor distances
being equal.
Parameters:
strategy (object): Class from pymatgen.analysis.local_env for
constructing graphs.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom)
"""
# Save original copies of inputs
self.ordered_structures_ = ordered_structures
self.energies_ = energies
# Sanitize inputs and optionally order them by energy / magnetic moments
hs = HeisenbergScreener(ordered_structures, energies, screen=False)
ordered_structures = hs.screened_structures
energies = hs.screened_energies
self.ordered_structures = ordered_structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
# Get graph representations
self.sgraphs = self._get_graphs(cutoff, ordered_structures)
# Get unique site ids and wyckoff symbols
self.unique_site_ids, self.wyckoff_ids = self._get_unique_sites(ordered_structures[0])
# These attributes are set by internal methods
self.nn_interactions = None
self.dists = None
self.ex_mat = None
self.ex_params = None
# Check how many commensurate graphs we found
if len(self.sgraphs) < 2:
print("We need at least 2 unique orderings.")
sys.exit(1)
else: # Set attributes
self._get_nn_dict()
self._get_exchange_df()
@staticmethod
def _get_graphs(cutoff, ordered_structures):
"""
Generate graph representations of magnetic structures with nearest
neighbor bonds. Right now this only works for MinimumDistanceNN.
Args:
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
ordered_structures (list): Structure objects.
Returns:
sgraphs (list): StructureGraph objects.
"""
# Strategy for finding neighbors
if cutoff:
strategy = MinimumDistanceNN(cutoff=cutoff, get_all_sites=True)
else:
strategy = MinimumDistanceNN() # only NN
# Generate structure graphs
sgraphs = [StructureGraph.with_local_env_strategy(s, strategy=strategy) for s in ordered_structures]
return sgraphs
@staticmethod
def _get_unique_sites(structure):
"""
Get dict that maps site indices to unique identifiers.
Args:
structure (Structure): ground state Structure object.
Returns:
unique_site_ids (dict): maps tuples of equivalent site indices to a
unique int identifier
wyckoff_ids (dict): maps tuples of equivalent site indices to their
wyckoff symbols
"""
# Get a nonmagnetic representation of the supercell geometry
s0 = CollinearMagneticStructureAnalyzer(
structure, make_primitive=False, threshold=0.0
).get_nonmagnetic_structure(make_primitive=False)
# Get unique sites and wyckoff positions
if "wyckoff" in s0.site_properties:
s0.remove_site_property("wyckoff")
symm_s0 = SpacegroupAnalyzer(s0).get_symmetrized_structure()
wyckoff = ["n/a"] * len(symm_s0)
equivalent_indices = symm_s0.equivalent_indices
wyckoff_symbols = symm_s0.wyckoff_symbols
# Construct dictionaries that map sites to numerical and wyckoff
# identifiers
unique_site_ids = {}
wyckoff_ids = {}
i = 0
for indices, symbol in zip(equivalent_indices, wyckoff_symbols):
unique_site_ids[tuple(indices)] = i
wyckoff_ids[i] = symbol
i += 1
for index in indices:
wyckoff[index] = symbol
return unique_site_ids, wyckoff_ids
def _get_nn_dict(self):
"""Get dict of unique nearest neighbor interactions.
Returns:
None: (sets self.nn_interactions and self.dists instance variables)
"""
tol = self.tol # tolerance on NN distances
sgraph = self.sgraphs[0]
unique_site_ids = self.unique_site_ids
nn_dict = {}
nnn_dict = {}
nnnn_dict = {}
all_dists = []
# Loop over unique sites and get neighbor distances up to NNNN
for k in unique_site_ids: # pylint: disable=C0206
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
dists = [round(cs[-1], 2) for cs in connected_sites] # i<->j distances
dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
dists = dists[:3] # keep up to NNNN
all_dists += dists
# Keep only up to NNNN and call dists equal if they are within tol
all_dists = sorted(list(set(all_dists)))
rm_list = []
for idx, d in enumerate(all_dists[:-1]):
if abs(d - all_dists[idx + 1]) < tol:
rm_list.append(idx + 1)
all_dists = [d for idx, d in enumerate(all_dists) if idx not in rm_list]
if len(all_dists) < 3: # pad with zeros
all_dists += [0.0] * (3 - len(all_dists))
all_dists = all_dists[:3]
labels = ["nn", "nnn", "nnnn"]
dists = dict(zip(labels, all_dists))
# Get dictionary keys for interactions
for k in unique_site_ids: # pylint: disable=C0206
i = k[0]
i_key = unique_site_ids[k]
connected_sites = sgraph.get_connected_sites(i)
# Loop over sites and determine unique NN, NNN, etc. interactions
for cs in connected_sites:
dist = round(cs[-1], 2) # i_j distance
j = cs[2] # j index
for key, value in unique_site_ids.items():
if j in key:
j_key = value
if abs(dist - dists["nn"]) <= tol:
nn_dict[i_key] = j_key
elif abs(dist - dists["nnn"]) <= tol:
nnn_dict[i_key] = j_key
elif abs(dist - dists["nnnn"]) <= tol:
nnnn_dict[i_key] = j_key
nn_interactions = {"nn": nn_dict, "nnn": nnn_dict, "nnnn": nnnn_dict}
self.dists = dists
self.nn_interactions = nn_interactions
def _get_exchange_df(self):
"""
Loop over all sites in a graph and count the number and types of
nearest neighbor interactions, computing +-|S_i . S_j| to construct
a Heisenberg Hamiltonian for each graph.
Returns:
None: (sets self.ex_mat instance variable)
TODO:
* Deal with large variance in |S| across configs
"""
sgraphs = self.sgraphs
tol = self.tol
unique_site_ids = self.unique_site_ids
nn_interactions = self.nn_interactions
dists = self.dists
# Get |site magmoms| from FM ordering so that S_i and S_j are consistent?
# Large S variations is throwing a loop
# fm_struct = self.get_low_energy_orderings()[0]
# Total energy and nonmagnetic energy contribution
columns = ["E", "E0"]
# Get labels of unique NN interactions
for k0, v0 in nn_interactions.items():
for i, j in v0.items(): # i and j indices
c = str(i) + "-" + str(j) + "-" + str(k0)
c_rev = str(j) + "-" + str(i) + "-" + str(k0)
if c not in columns and c_rev not in columns:
columns.append(c)
num_sgraphs = len(sgraphs)
# Keep n interactions (not counting 'E') for n+1 structure graphs
columns = columns[: num_sgraphs + 1]
num_nn_j = len(columns) - 1 # ignore total energy
j_columns = [name for name in columns if name not in ["E", "E0"]]
ex_mat_empty = pd.DataFrame(columns=columns)
ex_mat = ex_mat_empty.copy()
if len(j_columns) < 2:
self.ex_mat = ex_mat # Only <J> can be calculated here
else:
sgraphs_copy = copy.deepcopy(sgraphs)
sgraph_index = 0
# Loop over all sites in each graph and compute |S_i . S_j|
# for n+1 unique graphs to compute n exchange params
for graph in sgraphs:
sgraph = sgraphs_copy.pop(0)
ex_row = pd.DataFrame(np.zeros((1, num_nn_j + 1)), index=[sgraph_index], columns=columns)
for i, node in enumerate(sgraph.graph.nodes):
# s_i_sign = np.sign(sgraph.structure.site_properties['magmom'][i])
s_i = sgraph.structure.site_properties["magmom"][i]
for k, v in unique_site_ids.items():
if i in k:
i_index = v
# Get all connections for ith site and compute |S_i . S_j|
connections = sgraph.get_connected_sites(i)
# dists = [round(cs[-1], 2) for cs in connections] # i<->j distances
# dists = sorted(list(set(dists))) # NN, NNN, NNNN, etc.
for j, connection in enumerate(connections):
j_site = connection[2]
dist = round(connection[-1], 2) # i_j distance
# s_j_sign = np.sign(sgraph.structure.site_properties['magmom'][j_site])
s_j = sgraph.structure.site_properties["magmom"][j_site]
for k, v in unique_site_ids.items():
if j_site in k:
j_index = v
# Determine order of connection
if abs(dist - dists["nn"]) <= tol:
order = "-nn"
elif abs(dist - dists["nnn"]) <= tol:
order = "-nnn"
elif abs(dist - dists["nnnn"]) <= tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in ex_mat.columns:
ex_row.at[sgraph_index, j_ij] -= s_i * s_j
elif j_ji in ex_mat.columns:
ex_row.at[sgraph_index, j_ji] -= s_i * s_j
# Ignore the row if it is a duplicate to avoid singular matrix
if ex_mat.append(ex_row)[j_columns].equals(
ex_mat.append(ex_row)[j_columns].drop_duplicates(keep="first")
):
e_index = self.ordered_structures.index(sgraph.structure)
ex_row.at[sgraph_index, "E"] = self.energies[e_index]
sgraph_index += 1
ex_mat = ex_mat.append(ex_row)
# if sgraph_index == num_nn_j: # check for zero columns
# zeros = [b for b in (ex_mat[j_columns] == 0).all(axis=0)]
# if True in zeros:
# sgraph_index -= 1 # keep looking
ex_mat[j_columns] = ex_mat[j_columns].div(2.0) # 1/2 factor in Heisenberg Hamiltonian
ex_mat[["E0"]] = 1 # Nonmagnetic contribution
# Check for singularities and delete columns with all zeros
zeros = list((ex_mat == 0).all(axis=0))
if True in zeros:
c = ex_mat.columns[zeros.index(True)]
ex_mat = ex_mat.drop(columns=[c], axis=1)
# ex_mat = ex_mat.drop(ex_mat.tail(len_zeros).index)
# Force ex_mat to be square
ex_mat = ex_mat[: ex_mat.shape[1] - 1]
self.ex_mat = ex_mat
def get_exchange(self):
"""
Take Heisenberg Hamiltonian and corresponding energy for each row and
solve for the exchange parameters.
Returns:
ex_params (dict): Exchange parameter values (meV/atom).
"""
ex_mat = self.ex_mat
# Solve the matrix equation for J_ij values
E = ex_mat[["E"]]
j_names = [j for j in ex_mat.columns if j not in ["E"]]
# Only 1 NN interaction
if len(j_names) < 3:
# Estimate exchange by J ~ E_AFM - E_FM
j_avg = self.estimate_exchange()
ex_params = {"<J>": j_avg}
self.ex_params = ex_params
return ex_params
# Solve eigenvalue problem for more than 1 NN interaction
H = ex_mat.loc[:, ex_mat.columns != "E"].values
H_inv = np.linalg.inv(H)
j_ij = np.dot(H_inv, E)
# Convert J_ij to meV
j_ij[1:] *= 1000 # J_ij in meV
j_ij = j_ij.tolist()
ex_params = {j_name: j[0] for j_name, j in zip(j_names, j_ij)}
self.ex_params = ex_params
return ex_params
def get_low_energy_orderings(self):
"""
Find lowest energy FM and AFM orderings to compute E_AFM - E_FM.
Returns:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy
afm_e (float): afm energy
"""
fm_struct, afm_struct = None, None
mag_min = np.inf
mag_max = 0.001
fm_e_min = 0
afm_e_min = 0
# epas = [e / len(s) for (e, s) in zip(self.energies, self.ordered_structures)]
for s, e in zip(self.ordered_structures, self.energies):
ordering = CollinearMagneticStructureAnalyzer(s, threshold=0.0, make_primitive=False).ordering
magmoms = s.site_properties["magmom"]
# Try to find matching orderings first
if ordering == Ordering.FM and e < fm_e_min:
fm_struct = s
mag_max = abs(sum(magmoms))
fm_e = e
fm_e_min = e
if ordering == Ordering.AFM and e < afm_e_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
# Brute force search for closest thing to FM and AFM
if not fm_struct or not afm_struct:
for s, e in zip(self.ordered_structures, self.energies):
magmoms = s.site_properties["magmom"]
if abs(sum(magmoms)) > mag_max: # FM ground state
fm_struct = s
fm_e = e
mag_max = abs(sum(magmoms))
# AFM ground state
if abs(sum(magmoms)) < mag_min:
afm_struct = s
afm_e = e
mag_min = abs(sum(magmoms))
afm_e_min = e
elif abs(sum(magmoms)) == 0 and mag_min == 0:
if e < afm_e_min:
afm_struct = s
afm_e = e
afm_e_min = e
# Convert to magnetic structures with 'magmom' site property
fm_struct = CollinearMagneticStructureAnalyzer(
fm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
afm_struct = CollinearMagneticStructureAnalyzer(
afm_struct, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
return fm_struct, afm_struct, fm_e, afm_e
def estimate_exchange(self, fm_struct=None, afm_struct=None, fm_e=None, afm_e=None):
"""
Estimate <J> for a structure based on low energy FM and AFM orderings.
Args:
fm_struct (Structure): fm structure with 'magmom' site property
afm_struct (Structure): afm structure with 'magmom' site property
fm_e (float): fm energy/atom
afm_e (float): afm energy/atom
Returns:
j_avg (float): Average exchange parameter (meV/atom)
"""
# Get low energy orderings if not supplied
if any(arg is None for arg in [fm_struct, afm_struct, fm_e, afm_e]):
fm_struct, afm_struct, fm_e, afm_e = self.get_low_energy_orderings()
magmoms = fm_struct.site_properties["magmom"]
# Normalize energies by number of magnetic ions
# fm_e = fm_e / len(magmoms)
# afm_e = afm_e / len(afm_magmoms)
m_avg = np.mean([np.sqrt(m ** 2) for m in magmoms])
# If m_avg for FM config is < 1 we won't get sensibile results.
if m_avg < 1:
iamthedanger = """
Local magnetic moments are small (< 1 muB / atom). The
exchange parameters may be wrong, but <J> and the mean
field critical temperature estimate may be OK.
"""
logging.warning(iamthedanger)
delta_e = afm_e - fm_e # J > 0 -> FM
j_avg = delta_e / (m_avg ** 2) # eV / magnetic ion
j_avg *= 1000 # meV / ion
return j_avg
def get_mft_temperature(self, j_avg):
"""
Crude mean field estimate of critical temperature based on <J> for
one sublattice, or solving the coupled equations for a multisublattice
material.
Args:
j_avg (float): j_avg (float): Average exchange parameter (meV/atom)
Returns:
mft_t (float): Critical temperature (K)
"""
num_sublattices = len(self.unique_site_ids)
k_boltzmann = 0.0861733 # meV/K
# Only 1 magnetic sublattice
if num_sublattices == 1:
mft_t = 2 * abs(j_avg) / 3 / k_boltzmann
else: # multiple magnetic sublattices
omega = np.zeros((num_sublattices, num_sublattices))
ex_params = self.ex_params
ex_params = {k: v for (k, v) in ex_params.items() if k != "E0"} # ignore E0
for k in ex_params:
# split into i, j unique site identifiers
sites = k.split("-")
sites = [int(num) for num in sites[:2]] # cut 'nn' identifier
i, j = sites[0], sites[1]
omega[i, j] += ex_params[k]
omega[j, i] += ex_params[k]
omega = omega * 2 / 3 / k_boltzmann
eigenvals, eigenvecs = np.linalg.eig(omega)
mft_t = max(eigenvals)
if mft_t > 1500: # Not sensible!
stayoutofmyterritory = """
This mean field estimate is too high! Probably
the true low energy orderings were not given as inputs.
"""
logging.warning(stayoutofmyterritory)
return mft_t
def get_interaction_graph(self, filename=None):
"""
Get a StructureGraph with edges and weights that correspond to exchange
interactions and J_ij values, respectively.
Args:
filename (str): if not None, save interaction graph to filename.
Returns:
igraph (StructureGraph): Exchange interaction graph.
"""
structure = self.ordered_structures[0]
sgraph = self.sgraphs[0]
igraph = StructureGraph.with_empty_graph(
structure, edge_weight_name="exchange_constant", edge_weight_units="meV"
)
if "<J>" in self.ex_params: # Only <J> is available
warning_msg = """
Only <J> is available. The interaction graph will not tell
you much.
"""
logging.warning(warning_msg)
# J_ij exchange interaction matrix
for i, node in enumerate(sgraph.graph.nodes):
connections = sgraph.get_connected_sites(i)
for c in connections:
jimage = c[1] # relative integer coordinates of atom j
j = c[2] # index of neighbor
dist = c[-1] # i <-> j distance
j_exc = self._get_j_exc(i, j, dist)
igraph.add_edge(i, j, to_jimage=jimage, weight=j_exc, warn_duplicates=False)
# Save to a json file if desired
if filename:
if filename.endswith(".json"):
dumpfn(igraph, filename)
else:
filename += ".json"
dumpfn(igraph, filename)
return igraph
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites
(10E-2 precision)
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k, v in self.unique_site_ids.items():
if i in k:
i_index = v
if j in k:
j_index = v
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
def get_heisenberg_model(self):
"""Save results of mapping to a HeisenbergModel object.
Returns:
hmodel (HeisenbergModel): MSONable object.
"""
# Original formula unit with nonmagnetic ions
hm_formula = str(self.ordered_structures_[0].composition.reduced_formula)
hm_structures = self.ordered_structures
hm_energies = self.energies
hm_cutoff = self.cutoff
hm_tol = self.tol
hm_sgraphs = self.sgraphs
hm_usi = self.unique_site_ids
hm_wids = self.wyckoff_ids
hm_nni = self.nn_interactions
hm_d = self.dists
# Exchange matrix DataFrame in json format
hm_em = self.ex_mat.to_json()
hm_ep = self.get_exchange()
hm_javg = self.estimate_exchange()
hm_igraph = self.get_interaction_graph()
hmodel = HeisenbergModel(
hm_formula,
hm_structures,
hm_energies,
hm_cutoff,
hm_tol,
hm_sgraphs,
hm_usi,
hm_wids,
hm_nni,
hm_d,
hm_em,
hm_ep,
hm_javg,
hm_igraph,
)
return hmodel
class HeisenbergScreener:
"""
Class to clean and screen magnetic orderings.
"""
def __init__(self, structures, energies, screen=False):
"""
This class pre-processes magnetic orderings and energies for
HeisenbergMapper. It prioritizes low-energy orderings with large and
localized magnetic moments.
Args:
structures (list): Structure objects with magnetic moments.
energies (list): Energies/atom of magnetic orderings.
screen (bool): Try to screen out high energy and low-spin configurations.
Attributes:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
# Cleanup
structures, energies = self._do_cleanup(structures, energies)
n_structures = len(structures)
# If there are more than 2 structures, we want to perform a
# screening to prioritize well-behaved orderings
if screen and n_structures > 2:
structures, energies = self._do_screen(structures, energies)
self.screened_structures = structures
self.screened_energies = energies
@staticmethod
def _do_cleanup(structures, energies):
"""Sanitize input structures and energies.
Takes magnetic structures and performs the following operations
- Erases nonmagnetic ions and gives all ions ['magmom'] site prop
- Converts total energies -> energy / magnetic ion
- Checks for duplicate/degenerate orderings
- Sorts by energy
Args:
structures (list): Structure objects with magmoms.
energies (list): Corresponding energies.
Returns:
ordered_structures (list): Sanitized structures.
ordered_energies (list): Sorted energies.
"""
# Get only magnetic ions & give all structures site_properties['magmom']
# zero threshold so that magnetic ions with small moments
# are preserved
ordered_structures = [
CollinearMagneticStructureAnalyzer(
s, make_primitive=False, threshold=0.0
).get_structure_with_only_magnetic_atoms(make_primitive=False)
for s in structures
]
# Convert to energies / magnetic ion
energies = [e / len(s) for (e, s) in zip(energies, ordered_structures)]
# Check for duplicate / degenerate states (sometimes different initial
# configs relax to the same state)
remove_list = []
for i, e in enumerate(energies):
e_tol = 6 # 10^-6 eV/atom tol on energies
e = round(e, e_tol)
if i not in remove_list:
for i_check, e_check in enumerate(energies):
e_check = round(e_check, e_tol)
if i != i_check and i_check not in remove_list and e == e_check:
remove_list.append(i_check)
# Also discard structures with small |magmoms| < 0.1 uB
# xx - get rid of these or just bury them in the list?
# for i, s in enumerate(ordered_structures):
# magmoms = s.site_properties['magmom']
# if i not in remove_list:
# if any(abs(m) < 0.1 for m in magmoms):
# remove_list.append(i)
# Remove duplicates
if len(remove_list):
ordered_structures = [s for i, s in enumerate(ordered_structures) if i not in remove_list]
energies = [e for i, e in enumerate(energies) if i not in remove_list]
# Sort by energy if not already sorted
ordered_structures = [s for _, s in sorted(zip(energies, ordered_structures), reverse=False)]
ordered_energies = sorted(energies, reverse=False)
return ordered_structures, ordered_energies
@staticmethod
def _do_screen(structures, energies):
"""Screen and sort magnetic orderings based on some criteria.
Prioritize low energy orderings and large, localized magmoms. do_clean should be run first to sanitize inputs.
Args:
structures (list): At least three structure objects.
energies (list): Energies.
Returns:
screened_structures (list): Sorted structures.
screened_energies (list): Sorted energies.
"""
magmoms = [s.site_properties["magmom"] for s in structures]
n_below_1ub = [len([m for m in ms if abs(m) < 1]) for ms in magmoms]
df = pd.DataFrame(
{
"structure": structures,
"energy": energies,
"magmoms": magmoms,
"n_below_1ub": n_below_1ub,
}
)
# keep the ground and first excited state fixed to capture the
# low-energy spectrum
index = list(df.index)[2:]
df_high_energy = df.iloc[2:]
# Prioritize structures with fewer magmoms < 1 uB
df_high_energy = df_high_energy.sort_values(by="n_below_1ub")
index = [0, 1] + list(df_high_energy.index)
# sort
df = df.reindex(index)
screened_structures = list(df["structure"].values)
screened_energies = list(df["energy"].values)
return screened_structures, screened_energies
class HeisenbergModel(MSONable):
"""
Store a Heisenberg model fit to low-energy magnetic orderings.
Intended to be generated by HeisenbergMapper.get_heisenberg_model().
"""
def __init__(
self,
formula=None,
structures=None,
energies=None,
cutoff=None,
tol=None,
sgraphs=None,
unique_site_ids=None,
wyckoff_ids=None,
nn_interactions=None,
dists=None,
ex_mat=None,
ex_params=None,
javg=None,
igraph=None,
):
"""
Args:
formula (str): Reduced formula of compound.
structures (list): Structure objects with magmoms.
energies (list): Energies of each relaxed magnetic structure.
cutoff (float): Cutoff in Angstrom for nearest neighbor search.
tol (float): Tolerance (in Angstrom) on nearest neighbor distances being equal.
sgraphs (list): StructureGraph objects.
unique_site_ids (dict): Maps each site to its unique numerical
identifier.
wyckoff_ids (dict): Maps unique numerical identifier to wyckoff
position.
nn_interacations (dict): {i: j} pairs of NN interactions
between unique sites.
dists (dict): NN, NNN, and NNNN interaction distances
ex_mat (DataFrame): Invertible Heisenberg Hamiltonian for each
graph.
ex_params (dict): Exchange parameter values (meV/atom).
javg (float): <J> exchange param (meV/atom).
igraph (StructureGraph): Exchange interaction graph.
"""
self.formula = formula
self.structures = structures
self.energies = energies
self.cutoff = cutoff
self.tol = tol
self.sgraphs = sgraphs
self.unique_site_ids = unique_site_ids
self.wyckoff_ids = wyckoff_ids
self.nn_interactions = nn_interactions
self.dists = dists
self.ex_mat = ex_mat
self.ex_params = ex_params
self.javg = javg
self.igraph = igraph
def as_dict(self):
"""
Because some dicts have tuple keys, some sanitization is required for json compatibility.
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["@version"] = __version__
d["formula"] = self.formula
d["structures"] = [s.as_dict() for s in self.structures]
d["energies"] = self.energies
d["cutoff"] = self.cutoff
d["tol"] = self.tol
d["sgraphs"] = [sgraph.as_dict() for sgraph in self.sgraphs]
d["dists"] = self.dists
d["ex_params"] = self.ex_params
d["javg"] = self.javg
d["igraph"] = self.igraph.as_dict()
# Sanitize tuple & int keys
d["ex_mat"] = jsanitize(self.ex_mat)
d["nn_interactions"] = jsanitize(self.nn_interactions)
d["unique_site_ids"] = jsanitize(self.unique_site_ids)
d["wyckoff_ids"] = jsanitize(self.wyckoff_ids)
return d
@classmethod
def from_dict(cls, d):
"""Create a HeisenbergModel from a dict."""
# Reconstitute the site ids
usids = {}
wids = {}
nnis = {}
for k, v in d["nn_interactions"].items():
nn_dict = {}
for k1, v1 in v.items():
key = literal_eval(k1)
nn_dict[key] = v1
nnis[k] = nn_dict
for k, v in d["unique_site_ids"].items():
key = literal_eval(k)
if isinstance(key, int):
usids[tuple([key])] = v
elif isinstance(key, tuple):
usids[key] = v
for k, v in d["wyckoff_ids"].items():
key = literal_eval(k)
wids[key] = v
# Reconstitute the structure and graph objects
structures = []
sgraphs = []
for v in d["structures"]:
structures.append(Structure.from_dict(v))
for v in d["sgraphs"]:
sgraphs.append(StructureGraph.from_dict(v))
# Interaction graph
igraph = StructureGraph.from_dict(d["igraph"])
# Reconstitute the exchange matrix DataFrame
try:
ex_mat = eval(d["ex_mat"])
ex_mat = pd.DataFrame.from_dict(ex_mat)
except SyntaxError: # if ex_mat is empty
ex_mat = pd.DataFrame(columns=["E", "E0"])
hmodel = HeisenbergModel(
formula=d["formula"],
structures=structures,
energies=d["energies"],
cutoff=d["cutoff"],
tol=d["tol"],
sgraphs=sgraphs,
unique_site_ids=usids,
wyckoff_ids=wids,
nn_interactions=nnis,
dists=d["dists"],
ex_mat=ex_mat,
ex_params=d["ex_params"],
javg=d["javg"],
igraph=igraph,
)
return hmodel
def _get_j_exc(self, i, j, dist):
"""
Convenience method for looking up exchange parameter between two sites.
Args:
i (int): index of ith site
j (int): index of jth site
dist (float): distance (Angstrom) between sites +- tol
Returns:
j_exc (float): Exchange parameter in meV
"""
# Get unique site identifiers
for k in self.unique_site_ids.keys():
if i in k:
i_index = self.unique_site_ids[k]
if j in k:
j_index = self.unique_site_ids[k]
order = ""
# Determine order of interaction
if abs(dist - self.dists["nn"]) <= self.tol:
order = "-nn"
elif abs(dist - self.dists["nnn"]) <= self.tol:
order = "-nnn"
elif abs(dist - self.dists["nnnn"]) <= self.tol:
order = "-nnnn"
j_ij = str(i_index) + "-" + str(j_index) + order
j_ji = str(j_index) + "-" + str(i_index) + order
if j_ij in self.ex_params:
j_exc = self.ex_params[j_ij]
elif j_ji in self.ex_params:
j_exc = self.ex_params[j_ji]
else:
j_exc = 0
# Check if only averaged NN <J> values are available
if "<J>" in self.ex_params and order == "-nn":
j_exc = self.ex_params["<J>"]
return j_exc
|
mit
|
gfyoung/pandas
|
pandas/tests/indexes/period/test_period_range.py
|
4
|
3954
|
import numpy as np
import pytest
from pandas import NaT, Period, PeriodIndex, date_range, period_range
import pandas._testing as tm
class TestPeriodRange:
@pytest.mark.parametrize("freq", ["D", "W", "M", "Q", "A"])
def test_construction_from_string(self, freq):
# non-empty
expected = date_range(
start="2017-01-01", periods=5, freq=freq, name="foo"
).to_period()
start, end = str(expected[0]), str(expected[-1])
result = period_range(start=start, end=end, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(start=start, periods=5, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=5, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq=freq, name="foo")
result = period_range(start=start, periods=0, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq=freq, name="foo")
tm.assert_index_equal(result, expected)
def test_construction_from_period(self):
# upsampling
start, end = Period("2017Q1", freq="Q"), Period("2018Q1", freq="Q")
expected = date_range(
start="2017-03-31", end="2018-03-31", freq="M", name="foo"
).to_period()
result = period_range(start=start, end=end, freq="M", name="foo")
tm.assert_index_equal(result, expected)
# downsampling
start, end = Period("2017-1", freq="M"), Period("2019-12", freq="M")
expected = date_range(
start="2017-01-31", end="2019-12-31", freq="Q", name="foo"
).to_period()
result = period_range(start=start, end=end, freq="Q", name="foo")
tm.assert_index_equal(result, expected)
# test for issue # 21793
start, end = Period("2017Q1", freq="Q"), Period("2018Q1", freq="Q")
idx = period_range(start=start, end=end, freq="Q", name="foo")
result = idx == idx.values
expected = np.array([True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty
expected = PeriodIndex([], freq="W", name="foo")
result = period_range(start=start, periods=0, freq="W", name="foo")
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq="W", name="foo")
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq="W", name="foo")
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = (
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
with pytest.raises(ValueError, match=msg):
period_range(start="2017Q1")
with pytest.raises(ValueError, match=msg):
period_range(end="2017Q1")
with pytest.raises(ValueError, match=msg):
period_range(periods=5)
with pytest.raises(ValueError, match=msg):
period_range()
# too many params
with pytest.raises(ValueError, match=msg):
period_range(start="2017Q1", end="2018Q1", periods=8, freq="Q")
# start/end NaT
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start=NaT, end="2018Q1")
with pytest.raises(ValueError, match=msg):
period_range(start="2017Q1", end=NaT)
# invalid periods param
msg = "periods must be a number, got foo"
with pytest.raises(TypeError, match=msg):
period_range(start="2017Q1", periods="foo")
|
bsd-3-clause
|
rdhyee/PyTables
|
c-blosc/bench/plot-speeds.py
|
11
|
6852
|
"""Script for plotting the results of the 'suite' benchmark.
Invoke without parameters for usage hints.
:Author: Francesc Alted
:Date: 2010-06-01
"""
import matplotlib as mpl
from pylab import *
KB_ = 1024
MB_ = 1024*KB_
GB_ = 1024*MB_
NCHUNKS = 128 # keep in sync with bench.c
linewidth=2
#markers= ['+', ',', 'o', '.', 's', 'v', 'x', '>', '<', '^']
#markers= [ 'x', '+', 'o', 's', 'v', '^', '>', '<', ]
markers= [ 's', 'o', 'v', '^', '+', 'x', '>', '<', '.', ',' ]
markersize = 8
def get_values(filename):
f = open(filename)
values = {"memcpyw": [], "memcpyr": []}
for line in f:
if line.startswith('-->'):
tmp = line.split('-->')[1]
nthreads, size, elsize, sbits, codec = [i for i in tmp.split(', ')]
nthreads, size, elsize, sbits = map(int, (nthreads, size, elsize, sbits))
values["size"] = size * NCHUNKS / MB_;
values["elsize"] = elsize;
values["sbits"] = sbits;
values["codec"] = codec
# New run for nthreads
(ratios, speedsw, speedsr) = ([], [], [])
# Add a new entry for (ratios, speedw, speedr)
values[nthreads] = (ratios, speedsw, speedsr)
#print "-->", nthreads, size, elsize, sbits
elif line.startswith('memcpy(write):'):
tmp = line.split(',')[1]
memcpyw = float(tmp.split(' ')[1])
values["memcpyw"].append(memcpyw)
elif line.startswith('memcpy(read):'):
tmp = line.split(',')[1]
memcpyr = float(tmp.split(' ')[1])
values["memcpyr"].append(memcpyr)
elif line.startswith('comp(write):'):
tmp = line.split(',')[1]
speedw = float(tmp.split(' ')[1])
ratio = float(line.split(':')[-1])
speedsw.append(speedw)
ratios.append(ratio)
elif line.startswith('decomp(read):'):
tmp = line.split(',')[1]
speedr = float(tmp.split(' ')[1])
speedsr.append(speedr)
if "OK" not in line:
print "WARNING! OK not found in decomp line!"
f.close()
return nthreads, values
def show_plot(plots, yaxis, legends, gtitle, xmax=None):
xlabel('Compresssion ratio')
ylabel('Speed (MB/s)')
title(gtitle)
xlim(0, xmax)
#ylim(0, 10000)
ylim(0, None)
grid(True)
# legends = [f[f.find('-'):f.index('.out')] for f in filenames]
# legends = [l.replace('-', ' ') for l in legends]
#legend([p[0] for p in plots], legends, loc = "upper left")
legend([p[0] for p in plots
if not isinstance(p, mpl.lines.Line2D)],
legends, loc = "best")
#subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2)
if outfile:
print "Saving plot to:", outfile
savefig(outfile, dpi=64)
else:
show()
if __name__ == '__main__':
from optparse import OptionParser
usage = "usage: %prog [-r] [-o outfile] [-t title ] [-d|-c] filename"
compress_title = 'Compression speed'
decompress_title = 'Decompression speed'
yaxis = 'No axis name'
parser = OptionParser(usage=usage)
parser.add_option('-o',
'--outfile',
dest='outfile',
help=('filename for output (many extensions '
'supported, e.g. .png, .jpg, .pdf)'))
parser.add_option('-t',
'--title',
dest='title',
help='title of the plot',)
parser.add_option('-l',
'--limit',
dest='limit',
help='expression to limit number of threads shown',)
parser.add_option('-x',
'--xmax',
dest='xmax',
help='limit the x-axis',
default=None)
parser.add_option('-r', '--report', action='store_true',
dest='report',
help='generate file for reporting ',
default=False)
parser.add_option('-d', '--decompress', action='store_true',
dest='dspeed',
help='plot decompression data',
default=False)
parser.add_option('-c', '--compress', action='store_true',
dest='cspeed',
help='plot compression data',
default=False)
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No input arguments")
elif len(args) > 1:
parser.error("Too many input arguments")
else:
pass
if options.report and options.outfile:
parser.error("Can only select one of [-r, -o]")
if options.dspeed and options.cspeed:
parser.error("Can only select one of [-d, -c]")
elif options.cspeed:
options.dspeed = False
plot_title = compress_title
else: # either neither or dspeed
options.dspeed = True
plot_title = decompress_title
filename = args[0]
cspeed = options.cspeed
dspeed = options.dspeed
if options.outfile:
outfile = options.outfile
elif options.report:
if cspeed:
outfile = filename[:filename.rindex('.')] + '-compr.png'
else:
outfile = filename[:filename.rindex('.')] + '-decompr.png'
else:
outfile = None
plots = []
legends = []
nthreads, values = get_values(filename)
#print "Values:", values
if options.limit:
thread_range = eval(options.limit)
else:
thread_range = range(1, nthreads+1)
if options.title:
plot_title = options.title
else:
plot_title += " (%(size).1f MB, %(elsize)d bytes, %(sbits)d bits), %(codec)s" % values
gtitle = plot_title
for nt in thread_range:
#print "Values for %s threads --> %s" % (nt, values[nt])
(ratios, speedw, speedr) = values[nt]
if cspeed:
speed = speedw
else:
speed = speedr
#plot_ = semilogx(ratios, speed, linewidth=2)
plot_ = plot(ratios, speed, linewidth=2)
plots.append(plot_)
nmarker = nt
if nt >= len(markers):
nmarker = nt%len(markers)
setp(plot_, marker=markers[nmarker], markersize=markersize,
linewidth=linewidth)
legends.append("%d threads" % nt)
# Add memcpy lines
if cspeed:
mean = np.mean(values["memcpyw"])
message = "memcpy (write to memory)"
else:
mean = np.mean(values["memcpyr"])
message = "memcpy (read from memory)"
plot_ = axhline(mean, linewidth=3, linestyle='-.', color='black')
text(1.0, mean+50, message)
plots.append(plot_)
show_plot(plots, yaxis, legends, gtitle, xmax=int(options.xmax) if
options.xmax else None)
|
bsd-3-clause
|
jmikko/EasyMKL
|
Python/toytest_komd.py
|
1
|
2071
|
"""
@author: Michele Donini
@email: [email protected]
Toy test of the algorithm komd.py.
"""
# Test:
import sys
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import roc_auc_score
from sklearn.datasets import make_classification
from komd import KOMD
from cvxopt import matrix
import numpy as np
import matplotlib.pyplot as plt
# Binary classification problem
random_state = np.random.RandomState(0)
X, Y = make_classification(n_samples=1000,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.0,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=random_state)
X = matrix(X)
Y = matrix([1.0 if y>0 else -1.0 for y in Y])
# Train & Test:
pertr = 90
idtrain = range(0,len(Y) * pertr / 100)
idtest = range(len(Y) * pertr / 100,len(Y))
Ytr = Y[idtrain]
Yte = Y[idtest]
# Settings
ktype = 'rbf' # type of kernel
gamma = 10.0**-1 # RBF parameter
l = 0.1 # lambda of KOMD
# KOMD
classifier = KOMD(lam=l, Kf = ktype, rbf_gamma = gamma)
y_score = classifier.fit(X[idtrain,:], Ytr).decision_function(X[idtest,:])
print 'AUC test:',roc_auc_score(np.array(Yte), np.array(y_score))
# Images, only if the X.size[1]==2 (2 dimensional datasets):
PLOT_THE_CLASS = True
if PLOT_THE_CLASS and X.size[1] == 2:
ranktestnorm = [ (2 * (r - np.min(y_score))) / (np.max(y_score) - np.min(y_score)) - 1.0 for r in y_score]
plt.figure(1)
plt.scatter(X[idtrain, 0], X[idtrain, 1], marker='*', s = 140, c=Ytr, cmap='spring')
plt.scatter(X[idtest, 0], X[idtest, 1], marker='o', s = 180, c=ranktestnorm, cmap='spring')
plt.colorbar()
plt.show()
|
gpl-3.0
|
Aieener/HRE
|
2d.py
|
2
|
1779
|
#Author: Yuding Ai
#2015-July-15
#Visualize 2D hard rod
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
# ================================ Draw Ver Rods ===========================
a = 0
with open("2dplotv.txt", "r") as file:
for line in file:
a= a+1
xpos = np.zeros(a)
ypos = np.zeros(a)
i = 0
with open("2dplotv.txt", "r") as file:
for line in file:
words = line.split()
wx = words[0]
wy = words[1]
xpos[i] = wx
ypos[i] = wy
i = i+1
dx = np.ones(a)
dy = np.ones(a)
for y in range(0,a):
dy[y] = 8 # length
if a != 0:
ax.add_patch(
patches.Rectangle(
(xpos[y], ypos[y]),
dx[y],
dy[y],
facecolor="red",
linewidth=0.3
)
)
# ================================ Draw Hor Rods ===========================
a = 0
with open("2dploth.txt", "r") as file:
for line in file:
a= a+1
xpos = np.zeros(a)
ypos = np.zeros(a)
i = 0
with open("2dploth.txt", "r") as file:
for line in file:
words = line.split()
wx = words[0]
wy = words[1]
xpos[i] = wx
ypos[i] = wy
i = i+1
dx = np.ones(a)
dy = np.ones(a)
for y in range(0,a):
dx[y] = 8 # length
if a != 0:
ax.add_patch(
patches.Rectangle(
(xpos[y], ypos[y]),
dx[y],
dy[y],
facecolor="blue",
linewidth=0.3
)
)
plt.axis('equal')
plt.grid()
fig.savefig('2dplot.png', dpi=300, bbox_inches='tight')
# plt.show()
|
mit
|
GGoussar/scikit-image
|
doc/examples/color_exposure/plot_equalize.py
|
7
|
3034
|
"""
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
ax_img.set_adjustable('box-forced')
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 4), dtype=np.object)
axes[0, 0] = fig.add_subplot(2, 4, 1)
for i in range(1, 4):
axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])
for i in range(0, 4):
axes[1, i] = fig.add_subplot(2, 4, 5+i)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()
|
bsd-3-clause
|
ephes/scikit-learn
|
sklearn/lda.py
|
72
|
17751
|
"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
|
bsd-3-clause
|
strubell/nlp-class-proj
|
analysis.py
|
1
|
4594
|
# Analyze the POS and NER results, look for correlations between them
import numpy as np
import matplotlib.pyplot as plt
import operator
class Token:
def __init__(self, word, postag, gold_postag, nertag, gold_nertag):
self.word = word
self.postag = postag
self.gold_postag = gold_postag
self.nertag = nertag
self.gold_nertag = gold_nertag
class Sentence:
def __init__(self, tokens):
self.tokens = tokens
class Document:
def __init__(self, sentences):
self.sentences = sentences
self.ner_incorrect = []
self.pos_incorrect = []
self.ner_incorrect_pos_correct = []
self.ner_correct_pos_incorrect = []
self.both_incorrect = []
def read_output_file(filename):
f = open(filename, 'r')
sentences = []
tokens = []
for line in f:
line = line.strip()
if line == "":
sentences.append(Sentence(tokens))
tokens = []
else:
# line = line.replace("(", "")
# line = line.replace(")", "")
spline = line.split('~*~')
# print(spline)
tokens.append(Token(spline[0], spline[1], spline[2], spline[3], spline[4]))
return Document(sentences)
def collate_errors(doc):
# Go through each of the sentence and mark the type of error that occurs
for s in doc.sentences:
for t in s.tokens:
pos_c = t.postag == t.gold_postag
ner_c = t.nertag == t.gold_nertag
if not ner_c:
doc.ner_incorrect.append(t)
if not pos_c:
doc.pos_incorrect.append(t)
if ner_c and not pos_c:
doc.ner_correct_pos_incorrect.append(t)
if not ner_c and pos_c:
doc.ner_incorrect_pos_correct.append(t)
if not ner_c and not pos_c:
doc.both_incorrect.append(t)
def report_errors_4key(errors):
report = {}
for e in errors:
key = "{0} {1} {2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{0} {1}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
if key not in report:
report[key] = 0
report[key] += 1
sorted_report = sorted(report.iteritems(), key=operator.itemgetter(1))
sorted_report.reverse()
return sorted_report
def report_errors_nerkey(errors):
report = {}
for e in errors:
# key = "{0} {1} {2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
key = "{2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{0} {1}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
if key not in report:
report[key] = 0
report[key] += 1
sorted_report = sorted(report.iteritems(), key=operator.itemgetter(1))
sorted_report.reverse()
return sorted_report
def report_errors_poskey(errors):
report = {}
for e in errors:
# key = "{0} {1} {2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
# key = "{2} {3}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
key = "{0} {1}".format(e.postag, e.gold_postag, e.nertag, e.gold_nertag)
if key not in report:
report[key] = 0
report[key] += 1
sorted_report = sorted(report.iteritems(), key=operator.itemgetter(1))
sorted_report.reverse()
return sorted_report
def write_report(filename, report):
f = open(filename, 'w')
for tok in report:
f.write("\t".join(map(str, tok)) + "\n")
doc = read_output_file("ner-tag-output.txt")
collate_errors(doc)
write_report("both_incorrect.txt", report_errors_4key(doc.both_incorrect))
write_report("ner_incorrect.txt", report_errors_4key(doc.ner_incorrect))
write_report("pos_incorrect.txt", report_errors_4key(doc.pos_incorrect))
write_report("ner_correct_pos_incorrect.txt", report_errors_4key(doc.ner_correct_pos_incorrect))
write_report("ner_incorrect_pos_correct.txt", report_errors_4key(doc.ner_incorrect_pos_correct))
write_report("both_incorrect_nerkey.txt", report_errors_nerkey(doc.both_incorrect))
write_report("ner_incorrect_nerkey.txt", report_errors_nerkey(doc.ner_incorrect))
write_report("pos_incorrect_nerkey.txt", report_errors_nerkey(doc.pos_incorrect))
write_report("ner_correct_pos_incorrect_nerkey.txt", report_errors_nerkey(doc.ner_correct_pos_incorrect))
write_report("ner_incorrect_pos_correct_nerkey.txt", report_errors_nerkey(doc.ner_incorrect_pos_correct))
write_report("both_incorrect_poskey.txt", report_errors_poskey(doc.both_incorrect))
write_report("ner_incorrect_poskey.txt", report_errors_poskey(doc.ner_incorrect))
write_report("pos_incorrect_poskey.txt", report_errors_poskey(doc.pos_incorrect))
write_report("ner_correct_pos_incorrect_poskey.txt", report_errors_poskey(doc.ner_correct_pos_incorrect))
write_report("ner_incorrect_pos_correct_poskey.txt", report_errors_poskey(doc.ner_incorrect_pos_correct))
|
apache-2.0
|
nmartensen/pandas
|
pandas/tests/io/formats/test_style.py
|
4
|
39005
|
import copy
import textwrap
import re
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
jinja2 = pytest.importorskip('jinja2')
from pandas.io.formats.style import Styler, _get_level_lengths # noqa
class TestStyler(object):
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(['color: %s' % foo], index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_init_non_pandas(self):
with pytest.raises(TypeError):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_copy(self):
s2 = copy.copy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is s2.ctx # shallow
assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx == s2.ctx
assert self.styler._todo == s2._todo
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is not s2.ctx
assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx != s2.ctx
assert s2._todo == []
assert self.styler._todo != s2._todo
def test_clear(self):
s = self.df.style.highlight_max()._compute()
assert len(s.ctx) > 0
assert len(s._todo) > 0
s.clear()
assert len(s.ctx) == 0
assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.render()
# An index but no columns
DataFrame(columns=['a']).style.render()
# A column but no index
DataFrame(index=['a']).style.render()
# No IndexError raised?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'is_visible': True, 'display_value': ''},
{'class': 'col_heading level0 col0',
'display_value': 'A',
'type': 'th',
'value': 'A',
'is_visible': True,
},
{'class': 'col_heading level0 col1',
'display_value': 'B',
'type': 'th',
'value': 'B',
'is_visible': True,
},
{'class': 'col_heading level0 col2',
'display_value': 'C',
'type': 'th',
'value': 'C',
'is_visible': True,
}]]
assert result['head'] == expected
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'B', 'display_value': 'B', 'is_visible': True},
{'class': 'col_heading level0 col1', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'blank', 'type': 'th', 'value': ''},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
expected = [[
{'class': 'blank', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'index_name level1', 'type': 'th',
'value': 'B'},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: %s' % x.max() for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = dict(((r, c), ['color: baz'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = dict(((r, c), ['foo: bar'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_where_with_one_style(self):
# GH 17474
def f(x):
return x > 0.5
style1 = 'foo: bar'
result = self.df.style.where(f, style1)._compute().ctx
expected = dict(((r, c),
[style1 if f(self.df.loc[row, col]) else ''])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns))
assert result == expected
def test_where_subset(self):
# GH 17474
def f(x):
return x > 0.5
style1 = 'foo: bar'
style2 = 'baz: foo'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.where(f, style1, style2,
subset=slice_)._compute().ctx
expected = dict(((r, c),
[style1 if f(self.df.loc[row, col]) else style2])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_where_subset_compare_with_applymap(self):
# GH 17474
def f(x):
return x > 0.5
style1 = 'foo: bar'
style2 = 'baz: foo'
def g(x):
return style1 if f(x) else style2
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.where(f, style1, style2,
subset=slice_)._compute().ctx
expected = self.df.style.applymap(g, subset=slice_)._compute().ctx
assert result == expected
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
assert result == expected
def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 0%)']
}
assert result == expected
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 0%)']
}
assert result == expected
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%'],
(0, 2): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(0, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #d65f5f 0.0%, '
'#d65f5f 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#d65f5f 10.0%, #d65f5f 10.0%, '
'transparent 10.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, #5fba7d 10.0%'
', #5fba7d 30.0%, transparent 30.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#5fba7d 10.0%, #5fba7d 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = pd.DataFrame({'A': [10, 20, 50, 100]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 20.0%, transparent 20.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 50.0%, transparent 50.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 100.0%, transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, '
'#d65f5f 0.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 40.0%, '
'#d65f5f 40.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 70.0%, '
'#d65f5f 70.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 80.0%, '
'#d65f5f 80.0%, #d65f5f 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='zero', color=[
'#d65f5f', '#5fba7d'], width=90)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 45.0%, '
'#d65f5f 45.0%, #d65f5f 50%, '
'transparent 50%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, '
'#5fba7d 50%, #5fba7d 50.0%, '
'transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 60.0%, transparent 60.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 95.0%, transparent 95.0%)']}
assert result == expected
def test_bar_bad_align_raises(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
with pytest.raises(ValueError):
df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
assert result == expected
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with pytest.raises(ValueError):
df.style
with pytest.raises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
assert styler is result
assert styler.caption == 'baz'
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
assert result is styler
assert result.uuid == 'aaa'
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = pd.DataFrame({'a': [1, 3, 5, 6], 'b': [2, 4, 12, 21]})
result = df.style.render(uuid='test')
assert 'test' in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
assert s.precision == 10
s = Styler(self.df, precision=2)
assert s.precision == 2
s2 = s.set_precision(4)
assert s is s2
assert s.precision == 4
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
assert result[(1, 1)] == ['color: red']
def test_trim(self):
result = self.df.style.render() # trim=True
assert result.count('#') == 0
result = self.df.style.highlight_max().render()
assert result.count('#') == len(self.df.columns)
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
assert result[(1, 1)] == ['background-color: yellow']
result = getattr(df.style, attr)(color='green')._compute().ctx
assert result[(1, 1)] == ['background-color: green']
result = getattr(df.style, attr)(subset='A')._compute().ctx
assert result[(1, 0)] == ['background-color: yellow']
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
assert result == expected
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
assert result == expected
# separate since we cant negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: %s' if x > 0 else 'color: %s' % z
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.render()
def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
assert all(['display_value' in c for c in row] for row in ctx['body'])
assert (all([len(c['display_value']) <= 3 for c in row[1:]]
for row in ctx['body']))
assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
with pytest.raises(TypeError):
df.style.format(5)
with pytest.raises(TypeError):
df.style.format(True)
def test_display_subset(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"},
subset=pd.IndexSlice[0, :])._translate()
expected = '0.1'
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1234'
assert ctx['body'][0][2]['display_value'] == '12.34%'
raw_11 = '1.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice['a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][0][2]['display_value'] == '0.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, 'a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[[0, 1], ['a']])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1'
assert ctx['body'][0][2]['display_value'] == '0.1234'
assert ctx['body'][1][2]['display_value'] == '1.1234'
def test_display_dict(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][2]['display_value'] == '12.34%'
df['c'] = ['aaa', 'bbb']
ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][3]['display_value'] == 'AAA'
def test_bad_apply_shape(self):
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(lambda x: 'x', subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: [''], subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', '', ''])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], subset=1)
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], axis=1)
def test_apply_bad_return(self):
def f(x):
return ''
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(TypeError):
df.style._apply(f, axis=None)
def test_apply_bad_labels(self):
def f(x):
return pd.DataFrame(index=[1, 2], columns=['a', 'b'])
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = pd.MultiIndex.from_product([['a', 'b'], [0, 1, 2]])
expected = {(0, 0): 3, (0, 3): 3, (1, 0): 1, (1, 1): 1, (1, 2): 1,
(1, 3): 1, (1, 4): 1, (1, 5): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = pd.MultiIndex.from_arrays([
[1, 1, 2, 1],
['a', 'b', 'b', 'd']
])
expected = {(0, 0): 2, (0, 2): 1, (0, 3): 1,
(1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_mi_sparse(self):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body_0 = result['body'][0][0]
expected_0 = {
"value": "a", "display_value": "a", "is_visible": True,
"type": "th", "attributes": ["rowspan=2"],
"class": "row_heading level0 row0", "id": "level0_row0"
}
tm.assert_dict_equal(body_0, expected_0)
body_1 = result['body'][0][1]
expected_1 = {
"value": 0, "display_value": 0, "is_visible": True,
"type": "th", "class": "row_heading level1 row0",
"id": "level1_row0"
}
tm.assert_dict_equal(body_1, expected_1)
body_10 = result['body'][1][0]
expected_10 = {
"value": 'a', "display_value": 'a', "is_visible": False,
"type": "th", "class": "row_heading level0 row1",
"id": "level0_row1"
}
tm.assert_dict_equal(body_10, expected_10)
head = result['head'][0]
expected = [
{'type': 'th', 'class': 'blank', 'value': '',
'is_visible': True, "display_value": ''},
{'type': 'th', 'class': 'blank level0', 'value': '',
'is_visible': True, 'display_value': ''},
{'type': 'th', 'class': 'col_heading level0 col0', 'value': 'A',
'is_visible': True, 'display_value': 'A'}]
assert head == expected
def test_mi_sparse_disabled(self):
with pd.option_context('display.multi_sparse', False):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body = result['body']
for row in body:
assert 'attributes' not in row[0]
def test_mi_sparse_index_names(self):
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
result = df.style._translate()
head = result['head'][1]
expected = [{
'class': 'index_name level0', 'value': 'idx_level_0',
'type': 'th'},
{'class': 'index_name level1', 'value': 'idx_level_1',
'type': 'th'},
{'class': 'blank', 'value': '', 'type': 'th'}]
assert head == expected
def test_mi_sparse_column_names(self):
df = pd.DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
[['a', 'a', 'b', 'a'], [0, 1, 1, 2]],
names=['idx_level_0', 'idx_level_1']),
columns=pd.MultiIndex.from_arrays(
[['C1', 'C1', 'C2', 'C2'], [1, 0, 1, 0]],
names=['col_0', 'col_1']
)
)
result = df.style._translate()
head = result['head'][1]
expected = [
{'class': 'blank', 'value': '', 'display_value': '',
'type': 'th', 'is_visible': True},
{'class': 'index_name level1', 'value': 'col_1',
'display_value': 'col_1', 'is_visible': True, 'type': 'th'},
{'class': 'col_heading level1 col0',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col1',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
{'class': 'col_heading level1 col2',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col3',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
]
assert head == expected
class TestStylerMatplotlibDep(object):
def test_background_gradient(self):
tm._skip_if_no_mpl()
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for c_map in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert all("#" in x[0] for x in result.values())
assert result[(0, 0)] == result[(0, 1)]
assert result[(1, 0)] == result[(1, 1)]
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
assert result[(1, 0)] == ['background-color: #fff7fb']
def test_block_names():
# catch accidental removal of a block
expected = {
'before_style', 'style', 'table_styles', 'before_cellstyle',
'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
'before_rows', 'tr', 'after_rows',
}
result = set(Styler.template.blocks)
assert result == expected
def test_from_custom_template(tmpdir):
p = tmpdir.mkdir("templates").join("myhtml.tpl")
p.write(textwrap.dedent("""\
{% extends "html.tpl" %}
{% block table %}
<h1>{{ table_title|default("My Table") }}</h1>
{{ super() }}
{% endblock table %}"""))
result = Styler.from_custom_template(str(tmpdir.join('templates')),
'myhtml.tpl')
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template is not Styler.template
styler = result(pd.DataFrame({"A": [1, 2]}))
assert styler.render()
def test_shim():
# https://github.com/pandas-dev/pandas/pull/16059
# Remove in 0.21
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
from pandas.formats.style import Styler as _styler # noqa
|
bsd-3-clause
|
cloud-fan/spark
|
python/pyspark/sql/pandas/map_ops.py
|
23
|
3806
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.rdd import PythonEvalType
class PandasMapOpsMixin(object):
"""
Min-in for pandas map operations. Currently, only :class:`DataFrame`
can use this class.
"""
def mapInPandas(self, func, schema):
"""
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a pandas DataFrame, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pandas.DataFrame`\\s and return
another iterator of `pandas.DataFrame`\\s. All columns are passed
together as an iterator of `pandas.DataFrame`\\s to the function and the
returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
Each `pandas.DataFrame` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes an iterator of `pandas.DataFrame`\\s, and
outputs an iterator of `pandas.DataFrame`\\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for pdf in iterator:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is experimental
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import DataFrame
from pyspark.sql.pandas.functions import pandas_udf
assert isinstance(self, DataFrame)
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
udf_column = udf(*[self[col] for col in self.columns])
jdf = self._jdf.mapInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.map_ops
globs = pyspark.sql.pandas.map_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.map_ops tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.map_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
youdar/work
|
work/MTRIX/Look_at_tested_files.py
|
1
|
10301
|
from __future__ import division
import numpy as np
from numpy.random import rand
import matplotlib.pyplot as plt
import cPickle as pickle
import pylab as plb
import os, sys
def get_data():
tested_files = open('Collect_tested_files',"r").read().splitlines()
tested_files = [x.split('::') for x in tested_files]
tested_files = [[x[0],float(x[1]),x[2]] for x in tested_files]
r_factors_only = [x[1] for x in tested_files]
return tested_files,r_factors_only
def plot_data(data,data_labels):
def onpick3(event):
ind = event.ind
i = ind[0]
print '{0} R-Work: {1:.3f}'.format(data_labels[i],data[i])
#print 'onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind)
gr = 1.61803398875
h = 10
w = gr*h
d = 0.03
fig = plt.figure(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
ax1 = fig.add_subplot(111)
#col = ax1.scatter(z, y, 100*s, c, picker=True)
l = len(data)
x = xrange(1,l+1)
s = 0.5 + abs(rand(l) - 1)
c = rand(l)
col = ax1.scatter(x, data, 100*s, c, picker=True)
#fig.savefig('pscoll.eps')
fig.canvas.mpl_connect('pick_event',onpick3)
fig.set_size_inches(w,h)
ax1.set_ylim([0,max(data)+0.2])
ax1.set_xlim([0,l+1])
plt.show()
def plot_data2(data):
gr = 1.61803398875
h = 10
w = gr*h
d = 0.03
fig = plt.figure(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
ax1 = fig.add_subplot(111)
#col = ax1.scatter(z, y, 100*s, c, picker=True)
data.sort()
l = len(data)
x = xrange(1,l+1)
s = 0.5
c = rand(l)
#col = ax1.scatter(x,data)
col = ax1.scatter(x,data, 100*s, c)
#fig.savefig('pscoll.eps')
#fig.set_size_inches(w,h)
#ax1.set_ylim([0,max(data)+0.2])
#ax1.set_xlim([0,l+1])
#plt.show()
plt.figure(2)
plt.hist(data,bins=100)
plt.title('Compare R-work, (reported - calculated)/calculated')
#plt.title('Compare R-work, (reconstructed - expected)/expected')
plt.xlim([-1,1])
plt.show()
def plot_data3(data):
gr = 1.61803398875
h = 10
data.sort()
x = [r[0] for r in data]
y = [r[1] for r in data]
l = len(data)
p = plt.plot(x,y,'.')
plt.xlabel('R-work ratio, (reconstructed - expected)/expected')
plt.ylabel('Expected R-work')
plt.show()
def plot_data4(data_NCS,data_ASU,data_issues,plt_lim=0.7):
#gr = 1.61803398875
gr = 1
h = 10
w = gr*h
d = 0.08
fig = plt.figure(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
ax1 = fig.add_subplot(111)
#col = ax1.scatter(z, y, 100*s, c, picker=True)
data_NCS.sort()
data_ASU.sort()
#l = len(data)
#x = xrange(1,l+1)
#s = [1000*rec[1] for rec in data]
s = 40
#c = rand(l)
c_ncs = 'm'
#c_ncs = 'b'
c_asu = 'y'
c_issues = 'black'
x_ncs = [rec[0] for rec in data_NCS]
y_ncs = [rec[1] for rec in data_NCS]
x_asu = [rec[0] for rec in data_ASU]
y_asu = [rec[1] for rec in data_ASU]
x_issues = [rec[0] for rec in data_issues]
y_issues = [rec[1] for rec in data_issues]
#col = ax1.scatter(x,data)
p1 = ax1.scatter(x_asu,y_asu, s, c_asu, linewidths=0)
p2 = ax1.scatter(x_ncs,y_ncs, s, c_ncs, linewidths=0)
p3 = ax1.scatter(x_issues,y_issues,s+100, c_issues, linewidths=0, marker='*')
#fig.savefig('pscoll.eps')
#fig.set_size_inches(w,h)
#ax1.set_ylim([0,max(data)+0.2])
#ax1.set_xlim([0,l+1])
plt.xlim([0,plt_lim])
plt.ylim([0,plt_lim])
#
plt.xlabel('R-work calculated from PDB file',fontsize=18)
plt.ylabel('R-work calculated from reconstructed PDB file',fontsize=18)
plt.legend([p1,p2,p3],['From PDB','Reconstructed','R-work discrepancies'])
#
#plt.xlabel('R-work calculated from PDB file - R-work reported',fontsize=18)
#plt.ylabel('R-work calculated from reconstructed PDB file - R-work reported',fontsize=18)
#plt.legend([p1,p2],['From PDB','Reconstructed'])
#
#plt.xlabel('R-work from PDB file REMARKS',fontsize=18)
#plt.ylabel('Calculated R-work ',fontsize=18)
#plt.legend([p1,p2],['Reported in PDB','Reconstructed'])
#
plt.show()
def hist_data(data):
plt.figure(2)
plt.hist(data,bins=20)
plt.show()
def get_sets(data_files):
all_files = {x[0] for x in data_files}
equal_r = {x[0] for x in data_files if x[1]==x[2]}
return all_files,equal_r
def show_data_summeries(data_files,all_files,equal_r,
not_included_mtrix,files_with_bad_MTRIX,
files_with_good_MTRIX):
first_or_second = [1*(x[1]<x[2]) for x in data_files if x[1]!=x[2]]
not_equal_r = all_files - equal_r
r01 = {x[0] for x in data_files if min(x[1],[2]) <= 0.1}
r02 = {x[0] for x in data_files if min(x[1],[2]) > 0.1 and min(x[1],[2]) <= 0.2 }
r03 = {x[0] for x in data_files if min(x[1],[2]) > 0.2 and min(x[1],[2]) <= 0.3 }
rrest = {x[0] for x in data_files if min(x[1],[2]) > 0.3}
print 'Number of files that the recondtructes R-value is different than the pdb one'
print len(not_equal_r)
print 'Number of files that the reconstructed is smaller'
print sum(first_or_second)
print 'MTRIX transform are not included in pdb file data '
print '*'*60
print ' value <= 0.1 {}'.format(len(not_included_mtrix & r01))
print '0.1 < value <= 0.2 {}'.format(len(not_included_mtrix & r02))
print '0.2 < value <= 0.3 {}'.format(len(not_included_mtrix & r03))
print ' value > 0.3 {}'.format(len(not_included_mtrix & rrest))
print 'Files with bad rotation MTRIX '
print '*'*60
print ' value <= 0.1 {}'.format(len(files_with_bad_MTRIX & r01))
print '0.1 < value <= 0.2 {}'.format(len(files_with_bad_MTRIX & r02))
print '0.2 < value <= 0.3 {}'.format(len(files_with_bad_MTRIX & r03))
print ' value > 0.3 {}'.format(len(files_with_bad_MTRIX & rrest))
print 'Files with good rotation MTRIX '
print '*'*60
print ' value <= 0.1 {}'.format(len(files_with_good_MTRIX & r01))
print '0.1 < value <= 0.2 {}'.format(len(files_with_good_MTRIX & r02))
print '0.2 < value <= 0.3 {}'.format(len(files_with_good_MTRIX & r03))
print ' value > 0.3 {}'.format(len(files_with_good_MTRIX & rrest))
print '# files_with_good_MTRIX: {}'.format(len(files_with_good_MTRIX))
print '*'*60
a = not_equal_r & not_included_mtrix
b = not_equal_r - a
c = not_included_mtrix - a
print len(a)
print len(b)
print len(c)
print '*'*60
if __name__=='__main__':
# locate the directory containing the log files
osType = sys.platform
if osType.startswith('win'):
directory_path = r'c:\Phenix\Dev\Work\work\MTRIX\Data'
else:
directory_path = '/net/cci-filer2/raid1/home/youval/Work/work/MTRIX/Data'
# convert the path to python format
directory_path = os.path.realpath(directory_path)
os.chdir(directory_path)
print os.getcwd()
data_files = pickle.load(open('Collect_tested_files','r'))
# data_files is a list of records [['3kk6', 0.207, 0.312, 0.312, 'OK'],...
# ['3kk6', 0.207, 0.312, 0.312, 'OK'] : [pdb_file_name,r_work_expected,r_work_model_pdb,r_work_model_reconstructed,processing_msg]
probelm_files = pickle.load(open('files_with_problems','r'))
files_with_good_MTRIX = set(pickle.load(open('files_with_good_MTRIX','r')))
files_with_bad_MTRIX = set(pickle.load(open('files_with_bad_MTRIX','r')))
not_included_mtrix = set(open('mtrix_not_included_in_pdb.txt', 'r').read().splitlines())
# plot best data
#best_data = [min(x[1:]) for x in data_files]
#data_labels = [x[0] for x in data_files]
# create sets
#all_files,equal_r = get_sets(data_files)
#show_data_summeries(data_files,all_files,equal_r,
#not_included_mtrix,files_with_bad_MTRIX,
#files_with_good_MTRIX)
# records in data_files
# file_name::r_work_expected::r_work_model_pdb::r_work_model::msg
#x[0]: file_name: 4 charaters PDB file name
#x[1]: r_work_expected: r_work from pdb file
#x[2]: r_work_model_pdb: r_work calulated from pdb
#x[3]: r_work_model:r_work calculated for complete ASU
#plot_data(best_data, data_labels)
#hist_data(best_data)
# plot (R_reconstructed-R_expected)/R_expected
#data = [(x[1]-x[3])/x[3] for x in data_files]
#data = [(x[3]-x[2]) for x in data_files ]
#plot_data2(data)
#
# seperate between the files with complete ASU and a single NCS
NCS = []
ASU = []
for x in data_files:
if x[0] in not_included_mtrix:
NCS.append(x)
else:
ASU.append(x)
#
data_NCS = [[x[2],x[3]] for x in NCS]
data_ASU = [[x[2],x[3]] for x in ASU]
data_issues = [[x[2],x[3]] for x in data_files if (abs((x[1]-x[3])/x[1])>0.5)]
plot_data4(data_NCS,data_ASU,data_issues,plt_lim=0.7)
#
#data_NCS = [[x[2]-x[1],x[3]-x[1]] for x in NCS]
#data_ASU = [[x[2]-x[1],x[3]-x[1]] for x in ASU]
#data_issues = []
#plot_data4(data_NCS,data_ASU,data_issues,plt_lim=0.4)
#
#data_NCS = [[x[1],x[3]] for x in NCS]
#data_ASU = [[x[1],x[3]] for x in ASU]
#data_issues = []
#plot_data4(data_NCS,data_ASU,data_issues,plt_lim=0.65)
#
#data = [[(x[1]-x[2])/x[2]),x[2:]] for x in data_files]
#plot_data3(data)
#
#data = [[(x[1]-x[2])/x[2],x[0],x[-1]] for x in data_files]
#data.sort()
#print data[-20:]
#print [x[1] for x in data[:20]]
#
print '*'*60
print 'total files in plot: {}'.format(len(data_files))
print 'number of files with >50% from published: {}'.format(len(data_issues))
print 'Out of those, {} are from files with NCS'.format(len([x for x in NCS if (abs((x[1]-x[3])/x[1])>0.5)]))
print '*'*60
# collect interesting files
file_list = [x for x in data_files if x[3]-x[2] > 0.0]
#file_list = [x[0] for x in data_files if (x[2] == x[3])]
print len(file_list)
for x in file_list:
print x
#
print '*'*60
tmp = [x for x in NCS if (abs((x[1]-x[3])/x[1])>0.5)]
print 'number of files with r-work value issues: {}'.format(len(tmp))
for x in tmp:
print x
print 'done'
|
mit
|
bzero/arctic
|
howtos/201507_demo_pydata.py
|
3
|
3403
|
################################################
# Getting started
################################################
# Install Arctic
# pip install git+https://github.com/manahl/arctic.git
# That's it(!)
# Run MongoDB - https://www.mongodb.org/downloads
# $ mkdir /tmp/pydata-demo
# $ mongod --dbpath /tmp/pydata-demo
from datetime import datetime
import time
import ystockquote
from arctic import Arctic
import collections
import pandas
import pandas as pd
import pprint
################################################
# Loading data
################################################
def get_stock_history(ticker, start_date, end_date):
data = ystockquote.get_historical_prices(ticker, start_date, end_date)
df = pandas.DataFrame(collections.OrderedDict(sorted(data.items()))).T
df = df.convert_objects(convert_numeric=True)
return df
################################################
# VersionStore: Storing and updating stock data
################################################
arctic = Arctic('localhost')
# Create a VersionStore library
arctic.delete_library('jblackburn.stocks')
arctic.initialize_library('jblackburn.stocks')
arctic.list_libraries()
stocks = arctic['jblackburn.stocks']
# get some prices
aapl = get_stock_history('aapl', '2015-01-01', '2015-02-01')
aapl
# store them in the library
stocks.write('aapl', aapl, metadata={'source': 'YAHOO'})
stocks.read('aapl').data['Adj Close'].plot()
stocks.read('aapl').metadata
stocks.read('aapl').version
# Append some more prices - imagine doing this once per period
aapl = get_stock_history('aapl', '2015-02-01', '2015-03-01')
stocks.append('aapl', aapl)
stocks.read('aapl').data
# Reading different versions of the symbol
stocks.list_symbols()
stocks.list_versions('aapl')
# Read the different versions separately
stocks.read('aapl', as_of=1).data.ix[-1]
stocks.read('aapl', as_of=2).data.ix[-1]
# And we can snapshot all items in the library
stocks.snapshot('snap')
stocks.read('aapl', as_of='snap').data.ix[-1]
#################################
# Dealing with lots of data
#################################
#NSYE library
lib = arctic['nyse']
def load_all_stock_history_NYSE():
# Data downloaded from BBG Open Symbology:
#
nyse = pd.read_csv('/users/is/jblackburn/git/arctic/howtos/nyse.csv')
stocks = [x.split('/')[0] for x in nyse['Ticker']]
print len(stocks), " symbols"
for i, stock in enumerate(stocks):
try:
now = datetime.now()
data = get_stock_history('aapl', '1980-01-01', '2015-07-07')
lib.write(stock, data)
print "loaded data for: ", stock, datetime.now() - now
except Exception as e:
print "Failed for ", stock, str(e)
# load_all_stock_history_NYSE()
print len(lib.list_symbols()), " NYSE symbols loaded"
def read_all_data_from_lib(lib):
start = time.time()
rows_read = 0
for s in lib.list_symbols():
rows_read += len(lib.read(s).data)
print "Symbols: %s Rows: %s Time: %s Rows/s: %s" % (len(lib.list_symbols()),
rows_read,
(time.time() - start),
rows_read / (time.time() - start))
read_all_data_from_lib(lib)
# Symbols: 1315 Rows: 11460225 Rows/s: 2,209,909
|
lgpl-2.1
|
treverhines/PSGI
|
psgi/plot_fit.py
|
1
|
10212
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from myplot.basemap import Basemap
from myplot.quiver import Quiver
import misc
import matplotlib
matplotlib.quiver.Quiver = Quiver # for error ellipses
def quiver_args(position,disp_array,cov_array=None,mask=None):
N = len(position)
if mask is None:
mask = np.zeros(N,dtype=bool)
x = position[:,0]
y = position[:,1]
u = disp_array[:,0]
u[mask] = 0.0
v = disp_array[:,1]
v[mask] = 0.0
if cov_array is not None:
var_u = cov_array[:,0]
var_v = cov_array[:,1]
#cov_uv = cov_array[:,0,1]
cov_uv = 0*var_u
var_u[mask] = 1e-8
var_v[mask] = 1e-8
cov_uv[mask] = 1e-8
sigma_u = np.sqrt(var_u)
sigma_v = np.sqrt(var_v)
rho = cov_uv/(sigma_u*sigma_v)
return (x,y,u,v,(sigma_u,sigma_v,rho))
else:
return (x,y,u,v)
def create_default_basemap(lat_lst,lon_lst):
'''
creates a basemap that bounds lat_lst and lon_lst
'''
lon_buff = (max(lon_lst) - min(lon_lst))/10.0
lat_buff = (max(lat_lst) - min(lat_lst))/10.0
if lon_buff < 0.5:
lon_buff = 0.5
if lat_buff < 0.5:
lat_buff = 0.5
llcrnrlon = min(lon_lst) - lon_buff
llcrnrlat = min(lat_lst) - lat_buff
urcrnrlon = max(lon_lst) + lon_buff
urcrnrlat = max(lat_lst) + lat_buff
lon_0 = (llcrnrlon + urcrnrlon)/2.0
lat_0 = (llcrnrlat + urcrnrlat)/2.0
return Basemap(projection='tmerc',
lon_0 = lon_0,
lat_0 = lat_0,
llcrnrlon = llcrnrlon,
llcrnrlat = llcrnrlat,
urcrnrlon = urcrnrlon,
urcrnrlat = urcrnrlat,
resolution = 'h')
def view(data_list,
name_list=None,
draw_map=True,
quiver_scale=0.00001,
scale_length=1.0):
# data list is a list of dictionary-lie objects with keys: mean,
# covariance, mask, position, time
mask_list = []
mean_list = []
cov_list = []
if name_list is None:
name_list = ['displacement %s' % i for i in range(len(data_list))]
for data in data_list:
mask_list += [data['mask']]
mean_list += [data['mean']]
cov_list += [data['covariance']]
times = data_list[0]['time'][:]
lon = data_list[0]['position'][:,0]
lat = data_list[0]['position'][:,1]
station_names = data_list[0]['name'][:]
_view(mean_list,
cov_list,
times,
station_names,
lon,
lat,
mask_list,
disp_type=name_list,
draw_map=draw_map,
quiver_scale=quiver_scale,
scale_length=scale_length)
def _view(displacement_list,
covariance_list,
times,
station_names,
lon,
lat,
mask,
disp_type=None,
colors=None,
draw_map=False,
scale_length=1.0,
quiver_scale=0.0001,
map_resolution=200,
artists=None):
#times -= 2010.0
N = len(displacement_list)
Nx = len(lon)
if disp_type is None:
disp_type = ['']*N
if colors is None:
colors = ['k','b','r','g','m']
if artists is None:
artists = []
# setup background of main figure
sub_fig = plt.figure('Time Series',figsize=(9.0,6.6))
main_fig = plt.figure('Map View',figsize=(10,11.78))
slider_ax = main_fig.add_axes([0.08,0.88,0.76,0.04])
sub_ax1 = sub_fig.add_subplot(311)
sub_ax2 = sub_fig.add_subplot(312)
sub_ax3 = sub_fig.add_subplot(313)
main_ax = main_fig.add_axes([0.08,0.08,0.76,0.76])
time_slider = Slider(slider_ax,'time',
min(times),max(times),
valinit=min(times),
color='black')
time = min(times)
time_idx = np.argmin(abs(times - time))
if draw_map is True:
basemap = create_default_basemap(lat,lon)
position = basemap(lon,lat)
position = np.array(position).transpose()
main_ax.patch.set_facecolor([0.0,0.0,1.0,0.2])
basemap.drawtopography(ax=main_ax,vmin=-6000,vmax=4000,
alpha=1.0,resolution=map_resolution,zorder=0)
basemap.drawcoastlines(ax=main_ax,linewidth=1.5,zorder=1)
basemap.drawcountries(ax=main_ax,linewidth=1.5,zorder=1)
basemap.drawstates(ax=main_ax,linewidth=1,zorder=1)
basemap.drawrivers(ax=main_ax,linewidth=1,zorder=1)
basemap.drawmeridians(np.arange(np.floor(basemap.llcrnrlon),
np.ceil(basemap.urcrnrlon),1.0),
labels=[0,0,0,1],dashes=[2,2],
ax=main_ax,zorder=1)
basemap.drawparallels(np.arange(np.floor(basemap.llcrnrlat),
np.ceil(basemap.urcrnrlat),1.0),
labels=[1,0,0,0],dashes=[2,2],
ax=main_ax,zorder=1)
basemap.drawmapscale(units='km',
lat=basemap.latmin+(basemap.latmax-basemap.latmin)/10.0,
lon=basemap.lonmax-(basemap.lonmax-basemap.lonmin)/5.0,
fontsize=16,
lon0=(basemap.lonmin+basemap.lonmax)/2.0,
lat0=(basemap.latmin+basemap.latmax)/2.0,
barstyle='fancy',ax=main_ax,
length=100,zorder=10)
else:
position = np.array([lon,lat]).transpose()
main_ax.set_aspect('equal')
station_point_lst = []
station_point_label_lst = []
for sid in range(Nx):
loni = lon[sid]
lati = lat[sid]
x,y = position[sid,:]
#x,y = basemap(loni,lati)
station_point = main_ax.plot(x,y,'ko',markersize=3,picker=8,zorder=2)
station_point_label_lst += [station_point[0].get_label()]
station_point_lst += station_point
station_point_label_lst = np.array(station_point_label_lst,dtype=str)
Q_lst = []
for idx in range(N):
if covariance_list[idx] is not None:
args = quiver_args(position,
displacement_list[idx][time_idx,:,:],
covariance_list[idx][time_idx,:,:],
mask[idx][time_idx,:])
Q_lst += [main_ax.quiver(args[0],args[1],args[2],args[3],sigma=args[4],
scale_units='xy',
angles='xy',
width=0.004,
scale=quiver_scale,
color=colors[idx],
ellipse_edgecolors=colors[idx],
zorder=3)]
else:
args = quiver_args(position,
displacement_list[idx][time_idx,:,:])
Q_lst += [main_ax.quiver(args[0],args[1],args[2],args[3],
scale_units='xy',
angles='xy',
width=0.004,
scale=quiver_scale,
color=colors[idx],
ellipse_edgecolors=colors[idx],
zorder=3)]
u_scale = np.array([scale_length])
v_scale = np.array([0.0])
z_scale = np.array([0.0])
xlim = main_ax.get_xlim()
ylim = main_ax.get_ylim()
xo = xlim[0]
yo = ylim[0]
xwidth = xlim[1] - xlim[0]
ywidth = ylim[1] - ylim[0]
x_scale = np.array([xo + xwidth/10.0])
y_scale = np.array([yo + ywidth/10.0])
x_text = xo + xwidth/10.0
y_text = yo + ywidth/15.0
dy_text = ywidth/20.0
for i in range(N):
main_ax.text(x_text,y_text+i*dy_text,disp_type[i],fontsize=16)
main_ax.quiver(x_scale,y_scale+i*dy_text,u_scale,v_scale,
scale_units='xy',
angles='xy',
width=0.004,
scale=quiver_scale,
color=colors[i])
main_ax.text(x_text,y_text+N*dy_text,
'%s meter displacement' % np.round(scale_length,3),
fontsize=16)
def _slider_update(t):
time_idx = np.argmin(abs(t - times))
for idx in range(N):
if covariance_list[idx] is not None:
args = quiver_args(position,
displacement_list[idx][time_idx,:,:],
covariance_list[idx][time_idx,:,:],
mask[idx][time_idx,:])
Q_lst[idx].set_UVC(args[2],args[3],sigma=args[4])
else:
args = quiver_args(position,
displacement_list[idx][time_idx])
Q_lst[idx].set_UVC(args[2],args[3])
main_fig.canvas.draw()
return
def _onpick(event):
idx, = np.nonzero(str(event.artist.get_label()) == station_point_label_lst)
station_label = station_names[idx[0]]
sub_ax1.cla()
sub_ax2.cla()
sub_ax3.cla()
for i in range(N):
midx = mask[i][:,idx]
disp = displacement_list[i][:,idx,:]
cov = covariance_list[i][:,idx,:]
if i == 0:
sub_ax1.errorbar(times[~midx],
disp[~midx,0],
np.sqrt(cov[~midx,0]),
color=colors[i],capsize=0,fmt='.')
sub_ax2.errorbar(times[~midx],
disp[~midx,1],
np.sqrt(cov[~midx,1]),
color=colors[i],capsize=0,fmt='.')
sub_ax3.errorbar(times[~midx],
disp[~midx,2],
np.sqrt(cov[~midx,2]),
color=colors[i],capsize=0,fmt='.')
else:
sub_ax1.plot(times[~midx],
disp[~midx,0],
color=colors[i])
sub_ax2.plot(times[~midx],
disp[~midx,1],
color=colors[i])
sub_ax3.plot(times[~midx],
disp[~midx,2],
color=colors[i])
sub_ax1.set_title(station_label,fontsize=16)
sub_ax1.set_ylabel('easting',fontsize=16)
sub_ax2.set_ylabel('northing',fontsize=16)
sub_ax3.set_ylabel('vertical',fontsize=16)
sub_fig.canvas.draw()
event.artist.set_markersize(10)
main_fig.canvas.draw()
event.artist.set_markersize(3.0)
return
time_slider.on_changed(_slider_update)
main_fig.canvas.mpl_connect('pick_event',_onpick)
for a in artists:
ax.add_artist(a)
plt.show()
|
mit
|
grlee77/bart
|
bartview.py
|
9
|
13638
|
#!/usr/bin/python
#
# Copyright 2015. The Regents of the University of California.
# All rights reserved. Use of this source code is governed by
# a BSD-style license which can be found in the LICENSE file.
#
# Authors:
# 2015 Frank Ong <[email protected]>
from __future__ import division
import operator
import numpy as np
import sys
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from functools import partial
import time
import threading
import os.path
class DiscreteSlider(Slider):
"""A matplotlib slider widget with discrete steps."""
def __init__(self, *args, **kwargs):
self.previous_val = kwargs['valinit']
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
discrete_val = round(val)
xy = self.poly.xy
xy[2] = discrete_val, 1
xy[3] = discrete_val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt % discrete_val)
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if self.previous_val!=discrete_val:
self.previous_val = discrete_val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(discrete_val)
class BartView(object):
def __init__(self, cflname):
matplotlib.rcParams['toolbar'] = 'None'
#matplotlib.rcParams['font.size'] = 6
# Read data
self.cflname = sys.argv[1]
self.im = self.readcfl(self.cflname)
self.im_unsqueeze_shape = np.where( np.array(self.im.shape) > 1 )[0]
self.im = self.im.squeeze()
t1 = time.clock()
# Reorder image
self.Ndims = len( self.im.shape )
self.order = np.r_[:self.Ndims]
self.im_ordered = self.im
self.order3 = np.array([0,1,1])
# Slice image
self.slice_num = np.zeros( self.Ndims, dtype = 'int' );
self.im_shape = self.im_ordered.shape
self.im_slice = self.im_ordered[ (slice(None), slice(None)) + tuple(self.slice_num[2:]) ]
# Create figure
self.fig = plt.figure(facecolor='black', figsize=(9,6))
#self.fig = plt.figure(facecolor='black', figsize=(6,4))
self.fig.subplots_adjust( left=0.0 , bottom=0.0 , right=1.0 , top=1 - 0.25)
self.fig.canvas.set_window_title(self.cflname)
# Show image
self.immax = np.max(abs(self.im))
self.l = plt.imshow( abs(self.im_slice) , cmap = "gray", vmin=0, vmax=self.immax)
self.ax = plt.gca()
self.asp = self.im_ordered.shape[1] / self.im_ordered.shape[0]
self.aspect = 1
self.ax.set_aspect( 1 )
plt.axis('off')
radios = []
buttons = []
sliders = []
# Create Radio Buttons for X Y dimensions
dims = self.im_unsqueeze_shape[ self.order ].astype(str)
for i in xrange(0,len(dims)):
dims[i] = "Dim " + dims[i]
oboxx_ax = plt.axes( [0, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" )
oboxx_ax.set_xticks([]);
oboxx_ax.set_yticks([]);
orderx_ax = plt.axes( [0, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' )
orderx_radio = RadioButtons( orderx_ax, dims, activecolor = 'SteelBlue', active = 0 )
orderx_ax.text(0.5,1.05, 'Up/Down', horizontalalignment = 'center')
radios.append( orderx_radio )
orderx_radio.on_clicked( self.update_orderx )
oboxy_ax = plt.axes( [0.1, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" )
oboxy_ax.set_xticks([]);
oboxy_ax.set_yticks([]);
ordery_ax = plt.axes( [0.1, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' )
ordery_radio = RadioButtons( ordery_ax, dims, activecolor = 'SteelBlue', active = 1 )
ordery_ax.text(0.5,1.05, 'Left/Right', horizontalalignment = 'center')
radios.append( ordery_radio )
ordery_radio.on_clicked( self.update_ordery )
# Create Radio buttons for mosaic
self.mosaic_valid = False
mbox_ax = plt.axes( [0.2, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" )
mbox_ax.set_xticks([]);
mbox_ax.set_yticks([]);
mosaic_ax = plt.axes( [0.2, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' )
mosaic_radio = RadioButtons( mosaic_ax, dims, activecolor = 'SteelBlue', active = 1 )
mosaic_ax.text(0.5,1.05, 'Mosaic', horizontalalignment = 'center')
radios.append( mosaic_radio )
mosaic_radio.on_clicked( self.update_mosaic )
# Create flip buttons
self.flipx = 1;
flipx_ax = plt.axes( [0.3, 1 - 0.09, 0.1, 0.09] )
flipx_button = Button( flipx_ax, 'Flip\nUp/Down', color='gainsboro' )
flipx_button.on_clicked(self.update_flipx);
self.flipy = 1;
flipy_ax = plt.axes( [0.3, 1 - 0.18, 0.1, 0.09] )
flipy_button = Button( flipy_ax, 'Flip\nLeft/Right', color='gainsboro' )
flipy_button.on_clicked(self.update_flipy);
# Create Refresh buttons
refresh_ax = plt.axes( [0.4, 1 - 0.09, 0.1, 0.09] )
refresh_button = Button( refresh_ax, 'Refresh', color='gainsboro' )
refresh_button.on_clicked(self.update_refresh);
# Create Save button
save_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] )
save_button = Button( save_ax, 'Export to\nPNG', color='gainsboro' )
save_button.on_clicked(self.save);
# Create dynamic refresh radio button
#self.drefresh = threading.Event()
#drefresh_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] )
#drefresh_button = Button( drefresh_ax, 'Dynamic\nRefresh', color='gainsboro' )
#drefresh_button.on_clicked(self.update_drefresh);
# Create Magnitude/phase radio button
self.magnitude = True
mag_ax = plt.axes( [0.5, 1 - 0.18, 0.1, 0.18], axisbg = 'gainsboro' )
mag_radio = RadioButtons( mag_ax, ('Mag','Phase') , activecolor = 'SteelBlue', active = 0 )
radios.append( mag_radio )
mag_radio.on_clicked( self.update_magnitude )
sbox_ax = plt.axes( [0.6, 1 - 0.18, 0.5, 0.18], axisbg='gainsboro')
sbox_ax.set_xticks([])
sbox_ax.set_yticks([])
# Create aspect sliders
aspect_ax = plt.axes( [0.65, 1 - 0.09 + 0.02, 0.1, 0.02], axisbg = 'white' )
aspect_slider = Slider( aspect_ax, "", 0.25, 4, valinit=1, color='SteelBlue')
aspect_ax.text( 4 / 2,1.5, 'Aspect Ratio', horizontalalignment = 'center')
sliders.append( aspect_slider )
aspect_slider.on_changed( self.update_aspect )
# Create contrast sliders
self.vmin = 0
vmin_ax = plt.axes( [0.83, 1 - 0.09 + 0.02, 0.1, 0.02], axisbg = 'white' )
vmin_slider = Slider( vmin_ax, "", 0, 1, valinit=0, color='SteelBlue')
vmin_ax.text(0.5,1.5, 'Contrast Min', horizontalalignment = 'center')
sliders.append( vmin_slider )
vmin_slider.on_changed( self.update_vmin )
self.vmax = 1
vmax_ax = plt.axes( [0.83, 1 - 0.18 + 0.02, 0.1, 0.02], axisbg = 'white' )
vmax_slider = Slider( vmax_ax, "", 0, 1, valinit=1, color='SteelBlue')
vmax_ax.text(0.5,1.5, 'Contrast Max', horizontalalignment = 'center')
sliders.append( vmax_slider )
vmax_slider.on_changed( self.update_vmax )
# Create sliders for choosing slices
box_ax = plt.axes( [0, 1 - 0.25, 1, 0.07], axisbg='gainsboro')
box_ax.set_xticks([])
box_ax.set_yticks([])
slider_thick = 0.02
slider_start = 0.1
ax = []
for d in np.r_[:self.Ndims]:
slice_ax = plt.axes( [0.01 + 1 / self.Ndims * d, 1 - 0.24, 0.8 / self.Ndims, slider_thick] , axisbg='white')
slice_slider = DiscreteSlider( slice_ax, "", 0, self.im_shape[d]-1, valinit=self.slice_num[d],valfmt='%i', color='SteelBlue')
slice_ax.text( (self.im_shape[d]-1)/2,1.5, 'Dim %d Slice' % self.im_unsqueeze_shape[d], horizontalalignment = 'center' )
sliders.append(slice_slider);
slice_slider.on_changed( partial( self.update_slice, d ) )
plt.show()
def readcfl(self, name):
h = open(name + ".hdr", "r")
h.readline() # skip
l = h.readline()
dims = [int(i) for i in l.split( )]
n = reduce(operator.mul, dims, 1)
h.close()
return np.memmap( name + ".cfl", dtype = np.complex64, mode='r', shape=tuple(dims), order='F' )
def save( self, event ):
extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
num = 0
fname = self.cflname + '_' + str(num) + '.png'
while( os.path.isfile(fname) ):
num += 1
fname = self.cflname + '_' + str(num) + '.png'
self.fig.savefig(fname, bbox_inches=extent)
def update_flipx( self, event ):
self.flipx *= -1
self.update_image()
def update_flipy( self, event ):
self.flipy *= -1
self.update_image()
def update_refresh( self, event ):
self.update_image()
def dynamic_refresh( self ):
while( self.drefresh.is_set() ):
self.update_image()
def update_drefresh( self, event ):
if ( self.drefresh.is_set() ):
self.drefresh.clear()
else:
self.drefresh.set()
th = threading.Thread( target = self.dynamic_refresh )
th.start()
def update_aspect( self, aspect ):
self.aspect = aspect
self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * aspect )
def update_vmax( self, vmax ):
self.vmax = max(self.vmin, vmax)
self.l.set_clim( vmax = self.vmax * self.immax );
def update_vmin( self, vmin ):
self.vmin = min(self.vmax,vmin)
self.l.set_clim( vmin = self.vmin * self.immax );
def update_magnitude( self, l ):
self.magnitude = ( l == 'Mag' )
if (self.magnitude):
self.l.set_cmap('gray')
else:
self.l.set_cmap('hsv')
self.update_image()
def update_orderx( self, l ):
l = int(l[4:])
self.order3[0] = np.where( self.im_unsqueeze_shape == l )[0]
self.update_ordered_image()
def update_ordery( self, l ):
l = int(l[4:])
self.order3[1] = np.where( self.im_unsqueeze_shape == l )[0]
self.update_ordered_image()
def update_ordered_image(self):
self.mosaic_valid = len( self.order3[:3] ) == len( set( self.order3[:3] ) )
self.order_valid = len( self.order3[:2] ) == len( set( self.order3[:2] ) );
if ( self.mosaic_valid ):
self.order[:3] = self.order3[:3]
order_remain = np.r_[:self.Ndims]
for t in np.r_[:3]:
order_remain = order_remain[ (order_remain != self.order[t] ) ]
self.order[3:] = order_remain
self.im_ordered = np.transpose( self.im, self.order )
self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect )
self.update_image()
elif ( self.order_valid ):
self.order[:2] = self.order3[:2]
order_remain = np.r_[:self.Ndims]
for t in np.r_[:2]:
order_remain = order_remain[ (order_remain != self.order[t] ) ]
self.order[2:] = order_remain
self.im_ordered = np.transpose( self.im, self.order )
self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect )
self.update_image()
def update_image( self ):
self.immax = np.max(abs(self.im))
self.l.set_clim( vmin = self.vmin * self.immax , vmax = self.vmax * self.immax );
if ( self.mosaic_valid ):
im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy), slice(None)) + tuple(self.slice_num[self.order[3:]])]
im_slice = self.mosaic( im_slice )
else:
im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy)) + tuple(self.slice_num[self.order[2:]]) ]
if self.magnitude:
self.l.set_data( abs(im_slice) )
else:
self.l.set_data( (np.angle(im_slice) + np.pi) / (2 * np.pi) )
self.fig.canvas.draw()
def update_slice( self, d, s ):
self.slice_num[d] = int(round(s))
self.update_image()
def mosaic( self, im ):
im = im.squeeze()
(x, y, z) = im.shape
z2 = int( np.ceil( z ** 0.5 ) )
z = int( z2 ** 2 )
im = np.pad( im, [(0,0), (0,0), (0, z - im.shape[2] )], mode='constant')
im = im.reshape( (x, y * z, 1), order = 'F' )
im = im.transpose( (1, 2, 0) )
im = im.reshape( (y * z2 , z2, x), order = 'F' )
im = im.transpose( (2, 1, 0) )
im = im.reshape( (x * z2, y * z2), order = 'F' )
return im
def update_mosaic( self, l ):
l = int(l[4:])
self.order3[2] = np.where( self.im_unsqueeze_shape == l )[0]
self.update_ordered_image()
if __name__ == "__main__":
# Error if more than 1 argument
if (len(sys.argv) != 2):
print "BartView: multidimensional image viewer for cfl"
print "Usage: bview cflname"
exit()
BartView( sys.argv[1] )
|
bsd-3-clause
|
MaartenGr/BERTopic
|
bertopic/plotting/_hierarchy.py
|
1
|
3827
|
import numpy as np
from scipy.cluster.hierarchy import linkage
from typing import List
from sklearn.metrics.pairwise import cosine_similarity
import plotly.graph_objects as go
import plotly.figure_factory as ff
def visualize_hierarchy(topic_model,
orientation: str = "left",
topics: List[int] = None,
top_n_topics: int = None,
width: int = 1000,
height: int = 600) -> go.Figure:
""" Visualize a hierarchical structure of the topics
A ward linkage function is used to perform the
hierarchical clustering based on the cosine distance
matrix between topic embeddings.
Arguments:
topic_model: A fitted BERTopic instance.
orientation: The orientation of the figure.
Either 'left' or 'bottom'
topics: A selection of topics to visualize
top_n_topics: Only select the top n most frequent topics
width: The width of the figure.
height: The height of the figure.
Returns:
fig: A plotly figure
Usage:
To visualize the hierarchical structure of
topics simply run:
```python
topic_model.visualize_hierarchy()
```
Or if you want to save the resulting figure:
```python
fig = topic_model.visualize_hierarchy()
fig.write_html("path/to/file.html")
```
<iframe src="../../tutorial/visualization/hierarchy.html"
style="width:1000px; height: 680px; border: 0px;""></iframe>
"""
# Select topic embeddings
if topic_model.topic_embeddings is not None:
embeddings = np.array(topic_model.topic_embeddings)
else:
embeddings = topic_model.c_tf_idf
# Select topics based on top_n and topics args
if topics is not None:
topics = sorted(list(topics))
elif top_n_topics is not None:
topics = sorted(topic_model.get_topic_freq().Topic.to_list()[1:top_n_topics + 1])
else:
topics = sorted(list(topic_model.get_topics().keys()))
# Select embeddings
all_topics = sorted(list(topic_model.get_topics().keys()))
indices = np.array([all_topics.index(topic) for topic in topics])
embeddings = embeddings[indices]
# Create dendogram
distance_matrix = 1 - cosine_similarity(embeddings)
fig = ff.create_dendrogram(distance_matrix,
orientation=orientation,
linkagefun=lambda x: linkage(x, "ward"),
color_threshold=1)
# Create nicer labels
axis = "yaxis" if orientation == "left" else "xaxis"
new_labels = [[[str(topics[int(x)]), None]] + topic_model.get_topic(topics[int(x)])
for x in fig.layout[axis]["ticktext"]]
new_labels = ["_".join([label[0] for label in labels[:4]]) for labels in new_labels]
new_labels = [label if len(label) < 30 else label[:27] + "..." for label in new_labels]
# Stylize layout
fig.update_layout(
plot_bgcolor='#ECEFF1',
template="plotly_white",
title={
'text': "<b>Hierarchical Clustering",
'y': .95,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(
size=22,
color="Black")
},
width=width,
height=height,
hoverlabel=dict(
bgcolor="white",
font_size=16,
font_family="Rockwell"
),
)
# Stylize orientation
if orientation == "left":
fig.update_layout(yaxis=dict(tickmode="array",
ticktext=new_labels))
else:
fig.update_layout(xaxis=dict(tickmode="array",
ticktext=new_labels))
return fig
|
mit
|
rbharath/switch
|
Switch/muller_potential.py
|
1
|
3761
|
"""Propagating 2D dynamics on the muller potential using OpenMM.
Currently, we just put a harmonic restraint on the z coordinate,
since OpenMM needs to work in 3D. This isn't really a big deal, except
that it affects the meaning of the temperature and kinetic energy. So
take the meaning of those numbers with a grain of salt.
"""
from SwitchingKalman import *
from MullerForce import *
from numpy import array, reshape, savetxt, loadtxt, zeros
from simtk.unit import kelvin, picosecond, femtosecond, nanometer, dalton
from Kmeans import *
import simtk.openmm as mm
import matplotlib.pyplot as pp
import numpy as np
import sys
PLOT = True
LEARN = True
NUM_TRAJS = 1
# each particle is totally independent
nParticles = 1
mass = 1.0 * dalton
#temps = 200 300 500 750 1000 1250 1500 1750 2000
temperature = 3000 * kelvin
friction = 100 / picosecond
timestep = 10.0 * femtosecond
T = 500
sim_T = 1000
x_dim = 2
y_dim = 2
K = 3
NUM_ITERS = 5
em_vars = ['As', 'bs', 'Qs', 'Z', 'mus']
As = zeros((K, x_dim, x_dim))
bs = zeros((K, x_dim))
mus = zeros((K, x_dim))
Sigmas = zeros((K, x_dim, x_dim))
Qs = zeros((K, x_dim, x_dim))
# Allocate Memory
start = T/4
xs = zeros((NUM_TRAJS * (T-start), y_dim))
# Clear Display
pp.cla()
# Choose starting conformations uniform on the grid
# between (-1.5, -0.2) and (1.2, 2)
########################################################################
for traj in range(NUM_TRAJS):
system = mm.System()
mullerforce = MullerForce()
for i in range(nParticles):
system.addParticle(mass)
mullerforce.addParticle(i, [])
system.addForce(mullerforce)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
context = mm.Context(system, integrator)
startingPositions = (np.random.rand(nParticles, 3) * np.array([2.7, 1.8, 1])) + np.array([-1.5, -0.2, 0])
context.setPositions(startingPositions)
context.setVelocitiesToTemperature(temperature)
trajectory = zeros((T,2))
print "Traj %d" % traj
for i in range(T):
x = context.getState(getPositions=True).\
getPositions(asNumpy=True).value_in_unit(nanometer)
# Save the state
#print "\tshape(x[%d]) = %s" % (i, str(shape(x)))
if i > start:
xs[traj * (T-start) + (i-start),:] = x[0,0:2]
trajectory[i,:] = x[0,0:2]
integrator.step(10)
if LEARN:
# Compute K-means
means, assignments = kmeans(xs, K)
W_i_Ts = assignment_to_weights(assignments,K)
emp_means, emp_covars = empirical_wells(xs, W_i_Ts)
for i in range(K):
A = randn(x_dim, x_dim)
u, s, v = svd(A)
As[i] = rand() * dot(u, v.T)
bs[i] = dot(eye(x_dim) - As[i], means[i])
mus[i] = emp_means[i]
Sigmas[i] = emp_covars[i]
Qs[i] = 0.5 * Sigmas[i]
# Learn the Switching Filter
bs = means
l = SwitchingKalmanFilter(x_dim, y_dim, K=K,
As=As,bs=bs,mus=mus,Sigmas=Sigmas,Qs=Qs)
l.em(xs[:], em_iters=NUM_ITERS, em_vars=em_vars)
sim_xs,sim_Ss = l.sample(sim_T,s_init=0, x_init=means[0], y_init=means[0])
if PLOT:
pp.plot(trajectory[start:,0], trajectory[start:,1], color='k')
# Compute K-means
means, assignments = kmeans(xs, K)
pp.scatter(means[:,0], means[:,1], color='r',zorder=10)
pp.scatter(xs[:,0], xs[:,1], edgecolor='none', facecolor='k',zorder=1)
Delta = 0.5
minx = min(xs[:,0])
maxx = max(xs[:,0])
miny = min(xs[:,1])
maxy = max(xs[:,1])
if LEARN:
minx = min(min(sim_xs[:,0]), minx) - Delta
maxx = max(max(sim_xs[:,0]), maxx) + Delta
miny = min(min(sim_xs[:,1]), miny) - Delta
maxy = max(max(sim_xs[:,1]), maxy) + Delta
pp.scatter(sim_xs[:,0], sim_xs[:,1], edgecolor='none',
zorder=5,facecolor='g')
pp.plot(sim_xs[:,0], sim_xs[:,1], zorder=5,color='g')
MullerForce.plot(ax=pp.gca(),minx=minx,maxx=maxx,miny=miny,maxy=maxy)
pp.show()
|
bsd-2-clause
|
jaantollander/Pointwise-Convergence
|
src_legacy/io/load/load.py
|
4
|
2070
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import src_legacy.functions.piecewise_analytic as functions
from src_legacy.fourier_series.input.base import Inputs
from src_legacy.fourier_series.vector import to_col
from src_legacy.io.path import datapath_load
from src_legacy.other.settings import timeit
class Load:
def __init__(self, folder, name):
self.folder = folder
self.name = name
class LoadCsv(Load):
"""
Load individual csv file. Compute errors.
http://pandas.pydata.org/pandas-docs/stable/indexing.html
http://pandas.pydata.org/pandas-docs/stable/basics.html?highlight=iteration#iteration
http://pandas.pydata.org/pandas-docs/stable/basics.html?highlight=iteration#basics-apply
https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
http://numba.pydata.org/numba-doc/dev/user/examples.html
"""
file_format = '.csv'
def __init__(self, folder, function_name):
super(LoadCsv, self).__init__(folder, function_name)
self.function = getattr(functions, function_name)
self.filepath = datapath_load(self.folder, self.name + self.file_format)
self.inputs = Inputs()
self.inputs.load(self.folder)
self.values = self._load()
self.errors = self._errors()
self.error_interval = self._error_interval()
@timeit
def _load(self):
return pd.read_csv(self.filepath, header=[0, 1], index_col=[0],
sep=',', dtype=np.float64, engine='c')
@timeit
def _errors(self):
# TODO: More general solution
d = {i.symbol: i.to_array(np.float64) for i in self.inputs}
a = to_col(d['a'])
x = d['x']
function_values = self.function(a, x).flatten()
return np.abs(self.values - function_values)
def _error_interval(self):
d = self.errors.values
return np.min(d), np.max(d)
|
mit
|
madjelan/scikit-learn
|
benchmarks/bench_multilabel_metrics.py
|
86
|
7286
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
bsd-3-clause
|
mfjb/scikit-learn
|
sklearn/decomposition/base.py
|
313
|
5647
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
bsd-3-clause
|
yavalvas/yav_com
|
build/matplotlib/doc/mpl_toolkits/axes_grid/examples/make_room_for_ylabel_using_axesgrid.py
|
15
|
1723
|
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable
if __name__ == "__main__":
import matplotlib.pyplot as plt
def ex1():
plt.figure(1)
ax = plt.axes([0,0,1,1])
# ax = plt.subplot(111)
ax.set_yticks([0.5])
ax.set_yticklabels(["very long label"])
make_axes_area_auto_adjustable(ax)
def ex2():
plt.figure(2)
ax1 = plt.axes([0,0,1,0.5])
ax2 = plt.axes([0,0.5,1,0.5])
ax1.set_yticks([0.5])
ax1.set_yticklabels(["very long label"])
ax1.set_ylabel("Y label")
ax2.set_title("Title")
make_axes_area_auto_adjustable(ax1, pad=0.1, use_axes=[ax1, ax2])
make_axes_area_auto_adjustable(ax2, pad=0.1, use_axes=[ax1, ax2])
def ex3():
fig = plt.figure(3)
ax1 = plt.axes([0,0,1,1])
divider = make_axes_locatable(ax1)
ax2 = divider.new_horizontal("100%", pad=0.3, sharey=ax1)
ax2.tick_params(labelleft="off")
fig.add_axes(ax2)
divider.add_auto_adjustable_area(use_axes=[ax1], pad=0.1,
adjust_dirs=["left"])
divider.add_auto_adjustable_area(use_axes=[ax2], pad=0.1,
adjust_dirs=["right"])
divider.add_auto_adjustable_area(use_axes=[ax1, ax2], pad=0.1,
adjust_dirs=["top", "bottom"])
ax1.set_yticks([0.5])
ax1.set_yticklabels(["very long label"])
ax2.set_title("Title")
ax2.set_xlabel("X - Label")
ex1()
ex2()
ex3()
plt.show()
|
mit
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/tests/io/generate_legacy_storage_files.py
|
8
|
10070
|
#!/usr/env/bin python
""" self-contained to write legacy storage (pickle/msgpack) files """
from __future__ import print_function
from warnings import catch_warnings
from distutils.version import LooseVersion
from pandas import (Series, DataFrame, Panel,
SparseSeries, SparseDataFrame,
Index, MultiIndex, bdate_range, to_msgpack,
date_range, period_range,
Timestamp, NaT, Categorical, Period)
from pandas.compat import u
import os
import sys
import numpy as np
import pandas
import platform as pl
_loose_version = LooseVersion(pandas.__version__)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = u'bseries'
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = u'btsseries'
return bseries
def _create_sp_frame():
nan = np.nan
data = {u'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
u'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
u'C': np.arange(10).astype(np.int64),
u'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
dates = bdate_range('1/1/2011', periods=10)
return SparseDataFrame(data, index=dates)
def create_data():
""" create the pickle/msgpack data """
data = {
u'A': [0., 1., 2., 3., np.nan],
u'B': [0, 1, 0, 1, 0],
u'C': [u'foo1', u'foo2', u'foo3', u'foo4', u'foo5'],
u'D': date_range('1/1/2009', periods=5),
u'E': [0., 1, Timestamp('20100101'), u'foo', 2.]
}
scalars = dict(timestamp=Timestamp('20130101'),
period=Period('2012', 'M'))
index = dict(int=Index(np.arange(10)),
date=date_range('20130101', periods=10),
period=period_range('2013-01-01', freq='M', periods=10))
mi = dict(reg2=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz', u'baz', u'foo',
u'foo', u'qux', u'qux'],
[u'one', u'two', u'one', u'two', u'one',
u'two', u'one', u'two']])),
names=[u'first', u'second']))
series = dict(float=Series(data[u'A']),
int=Series(data[u'B']),
mixed=Series(data[u'E']),
ts=Series(np.arange(10).astype(np.int64),
index=date_range('20130101', periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2],
[3, 4, 3, 4, 5]])),
names=[u'one', u'two'])),
dup=Series(np.arange(5).astype(np.float64),
index=[u'A', u'B', u'C', u'D', u'A']),
cat=Series(Categorical([u'foo', u'bar', u'baz'])),
dt=Series(date_range('20130101', periods=5)),
dt_tz=Series(date_range('20130101', periods=5,
tz='US/Eastern')),
period=Series([Period('2000Q1')] * 5))
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list(u"ABCDA")
frame = dict(float=DataFrame({u'A': series[u'float'],
u'B': series[u'float'] + 1}),
int=DataFrame({u'A': series[u'int'],
u'B': series[u'int'] + 1}),
mixed=DataFrame({k: data[k]
for k in [u'A', u'B', u'C', u'D']}),
mi=DataFrame({u'A': np.arange(5).astype(np.float64),
u'B': np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(zip(*[[u'bar', u'bar', u'baz',
u'baz', u'baz'],
[u'one', u'two', u'one',
u'two', u'three']])),
names=[u'first', u'second'])),
dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
columns=[u'A', u'B', u'A']),
cat_onecol=DataFrame({u'A': Categorical([u'foo', u'bar'])}),
cat_and_float=DataFrame({
u'A': Categorical([u'foo', u'bar', u'baz']),
u'B': np.arange(3).astype(np.int64)}),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET')}, index=range(5)),
dt_mixed2_tzs=DataFrame({
u'A': Timestamp('20130102', tz='US/Eastern'),
u'B': Timestamp('20130603', tz='CET'),
u'C': Timestamp('20130603', tz='UTC')}, index=range(5))
)
with catch_warnings(record=True):
mixed_dup_panel = Panel({u'ItemA': frame[u'float'],
u'ItemB': frame[u'int']})
mixed_dup_panel.items = [u'ItemA', u'ItemA']
panel = dict(float=Panel({u'ItemA': frame[u'float'],
u'ItemB': frame[u'float'] + 1}),
dup=Panel(
np.arange(30).reshape(3, 5, 2).astype(np.float64),
items=[u'A', u'B', u'A']),
mixed_dup=mixed_dup_panel)
cat = dict(int8=Categorical(list('abcdefg')),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)))
timestamp = dict(normal=Timestamp('2011-01-01'),
nat=NaT,
tz=Timestamp('2011-01-01', tz='US/Eastern'),
freq=Timestamp('2011-01-01', freq='D'),
both=Timestamp('2011-01-01', tz='Asia/Tokyo',
freq='M'))
return dict(series=series,
frame=frame,
panel=panel,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(),
ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp)
def create_pickle_data():
data = create_data()
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
if _loose_version < '0.14.1':
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
if _loose_version < '0.17.0':
del data['series']['period']
del data['scalars']['period']
return data
def _u(x):
return {u(k): _u(x[k]) for k in x} if isinstance(x, dict) else x
def create_msgpack_data():
data = create_data()
if _loose_version < '0.17.0':
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
if _loose_version < '0.18.0':
del data['series']['dt_tz']
del data['frame']['dt_mixed_tzs']
# Not supported
del data['sp_series']
del data['sp_frame']
del data['series']['cat']
del data['series']['period']
del data['frame']['cat_onecol']
del data['frame']['cat_and_float']
del data['scalars']['period']
return _u(data)
def platform_name():
return '_'.join([str(pandas.__version__), str(pl.machine()),
str(pl.system().lower()), str(pl.python_version())])
def write_legacy_pickles(output_dir):
# make sure we are < 0.13 compat (in py3)
try:
from pandas.compat import zip, cPickle as pickle # noqa
except:
import pickle
version = pandas.__version__
print("This script generates a storage file for the current arch, system, "
"and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: pickle")
pth = '{0}.pickle'.format(platform_name())
fh = open(os.path.join(output_dir, pth), 'wb')
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print("created pickle file: %s" % pth)
def write_legacy_msgpack(output_dir, compress):
version = pandas.__version__
print("This script generates a storage file for the current arch, "
"system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: msgpack")
pth = '{0}.msgpack'.format(platform_name())
to_msgpack(os.path.join(output_dir, pth), create_msgpack_data(),
compress=compress)
print("created msgpack file: %s" % pth)
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, '.')
if not (3 <= len(sys.argv) <= 4):
exit("Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
"<msgpack_compress_type>")
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
try:
compress_type = str(sys.argv[3])
except IndexError:
compress_type = None
if storage_type == 'pickle':
write_legacy_pickles(output_dir=output_dir)
elif storage_type == 'msgpack':
write_legacy_msgpack(output_dir=output_dir, compress=compress_type)
else:
exit("storage_type must be one of {'pickle', 'msgpack'}")
if __name__ == '__main__':
write_legacy_file()
|
mit
|
sthyme/ZFSchizophrenia
|
BehaviorAnalysis/HSMovieAnalysis/centroidTracking_updated.py
|
1
|
11289
|
#!/usr/bin/python -tt
"""
SCRIPT RUNNING NOTES, WILL ADD SOMEDAY
"""
# IMPORT NECESSARY MODULES
import matplotlib.image as mpimg
import numpy as np
import cv2
from datetime import datetime, timedelta
import sys
import imageTools
import motionTools
#from collections import dequeue
from scipy.stats import mode
numberofwells = 96
xdim = 1088
ydim = 660
def max_min():
#with open(centroidfile, 'rb') as fid:
with open("testlog.centroid1.Tue, Jun 21, 2016", 'rb') as fid:
cen_data_array = np.fromfile(fid, '>u2')
cen_data_array = cen_data_array.reshape(cen_data_array.size / (numberofwells*2), (numberofwells*2))
cen_data_array[cen_data_array == 65535] = 0 # just setting to zero to make it easier to ignore
maxxys = []
minxys = []
for n in range (0, numberofwells*2,2):
#print "fish: ", n
maxtest = np.amax(cen_data_array[:,n])
mintest = np.amin(cen_data_array[:,n])
# Adds the x and y coordinates to the arrays in an interleaved manner for the next steps, ie, x1 then y1, x2 then y2
if maxtest == mintest and maxtest == 0:
maxxys.append(0)
maxxys.append(0)
minxys.append(0)
minxys.append(0)
#maxxs.append(0)
#maxys.append(0)
#minxs.append(0)
#minys.append(0)
# IF WELL IS EMPTY OR NOTHING EVER MOVES NEED A CHECK - ie, if MIN AND MAX ARE EQUAL?
else:
maxrealx = maxtest
minrealx = np.amin(cen_data_array[:,n][np.nonzero(cen_data_array[:,n])])
maxrealy = np.amax(cen_data_array[:,n+1])
minrealy = np.amin(cen_data_array[:,n+1][np.nonzero(cen_data_array[:,n+1])])
maxxys.append(maxrealx)
maxxys.append(maxrealy)
minxys.append(minrealx)
minxys.append(minrealy)
#maxxs.append(maxrealx)
#maxys.append(maxrealy)
#minxs.append(minrealx)
#minys.append(minrealy)
#print len(maxxs), len(maxys), len(minxs), len(minys)
#for x in maxxs:
#maxxys = np.array([maxxs, maxys])
#minxys = np.array([minxs, minys])
maxxysnp = np.array(maxxys)
minxysnp = np.array(minxys)
#print np.shape(maxxys)
return( maxxysnp, minxysnp)
def main(pixThreshold,frameRate,videoStream):
#elate_loc={(1,1):1, (1,2):2, (1,3):3, (1,4):4, (1:5):5, (1,6):6, (1,7):7, (1,8):8, (1,9):9,(1,10):10,(1,11):11,(1,12):12,(2,1):13,(2,2):14,(2,3):15,(2,4):16,(2:5):17,(2,6):18,(2,7):19,(2,8):20,(2,9):21,(2,10):22,(2,11):23,(2,12):24,(3,1):25,(3,2):26,(3,3):27,(3,4):28,(3,5):29,(3,6):30,(3,7)}
rowold={1:0,2:12,3:24,4:36,5:48,6:60,7:72,8:84}
row={0:0,1:12,2:24,3:36,4:48,5:60,6:72,7:84}
expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed
saveFreq = 4500 # how often to save data, in frames
i,m = imageTools.loadImageAndMask()
e = imageTools.loadModeImage()
roimask = np.zeros((660,1088))
(maxxysnp, minxysnp) = max_min()
print "maxxysnp: ", maxxysnp
print "minxysnp: ", minxysnp
maxxs = []
minxs = []
maxys = []
minys = []
for j in range (0, numberofwells*2,2):
maxx = maxxysnp[j]
maxxs.append(maxxysnp[j])
maxy = maxxysnp[j+1]
maxys.append(maxxysnp[j+1])
minx = minxysnp[j]
minxs.append(minxysnp[j])
miny = minxysnp[j+1]
minys.append(minxysnp[j+1])
roimask[miny:maxy,minx:maxx] = j+1
maxxs.sort()
maxys.sort()
minxs.sort()
minys.sort()
#smaxxs = []
#sminxs = []
#smaxys = []
#sminys = []
#rx = 8
#cy = 12
#realmaxx = 0
#realmaxxs = []
#for z in range(0, len(maxxs)):
# if z == cy - 1:
# realmaxxs.append(realmaxx)
# realmaxx = 0
# if maxxs[z] > realmaxx:
# realmaxx = maxxs[z]
np.set_printoptions(threshold=np.nan) # printing entire array
print roimask
# print maxx,maxy,minx,miny
#print e
#cv2.imwrite('testinge.jpg', e)
#cv2.imwrite('testingm.jpg', m)
#cv2.imwrite('testingi.jpg', i)
# convert mask to integer values for bincount weights
#print "mask1: ", np.shape(m), m
m,w = imageTools.convertMaskToWeights(m)
rm,roimaskweights = imageTools.convertMaskToWeights(roimask)
#print "mask2: ", np.shape(m), m
#print "weights: ", np.shape(w), w
unique = np.unique(m)
print "unique: ", unique
unique2 = np.unique(rm)
print "unique2: ", unique2
#print np.shape(m)
rminr = set()
rminc = set()
rmaxr = set()
rmaxc = set()
for x in unique2:
#print "x: ", x
#if x == 0:
# continue
#print np.shape(np.where(m==x))
#print np.where(m==x)
rmaxdimc = np.amax(np.where(rm==x)[0])
rmaxc.add(rmaxdimc)
#print "max dimc: ", np.amax(np.where(m==x)[0])
rmaxdimr = np.amax(np.where(rm==x)[1])
rmaxr.add(rmaxdimr)
#print "max dimr: ", np.amax(np.where(m==x)[1])
rmindimc = np.amin(np.where(rm==x)[0])
rminc.add(rmindimc)
#print "min dimc: ", np.amin(np.where(m==x)[0])
rmindimr = np.amin(np.where(rm==x)[1])
rminr.add(rmindimr)
#print "min dimr: ", np.amin(np.where(m==x)[1])
rlminx = list(rminr)
rlminx.sort()
rlmaxx = list(rmaxr)
rlmaxx.sort()
rlminy = list(rminc)
rlminy.sort()
rlmaxy = list(rmaxc)
rlmaxy.sort()
print rlminx, rlmaxx, rlminy, rlmaxy
minr = set()
minc = set()
maxr = set()
maxc = set()
for x in unique:
#print "x: ", x
if x == 0:
continue
#print np.shape(np.where(m==x))
#print np.where(m==x)
maxdimc = np.amax(np.where(m==x)[0])
maxc.add(maxdimc)
#print "max dimc: ", np.amax(np.where(m==x)[0])
maxdimr = np.amax(np.where(m==x)[1])
maxr.add(maxdimr)
#print "max dimr: ", np.amax(np.where(m==x)[1])
mindimc = np.amin(np.where(m==x)[0])
minc.add(mindimc)
#print "min dimc: ", np.amin(np.where(m==x)[0])
mindimr = np.amin(np.where(m==x)[1])
minr.add(mindimr)
#print "min dimr: ", np.amin(np.where(m==x)[1])
#print minr, maxr, minc, maxc
lminx = list(minr)
lminx.sort()
lmaxx = list(maxr)
lmaxx.sort()
lminy = list(minc)
lminy.sort()
lmaxy = list(maxc)
lmaxy.sort()
print "real mask: ", lminx, lmaxx, lminy, lmaxy
#print lminr, lmaxr, lminc, lmaxc
#print np.where(m==75)
#for x in len(m):
#print m[x]
#print "mask2: ", np.shape(m), m
#print "mask: ", np.nonzero(m)
#print m,w
# start camera or open video
videoType, displayDiffs = imageTools.getVideoType(videoStream)
cap = cv2.VideoCapture(videoStream)
# adjust video resolution if necessary (sized to mask)
print 'Camera resolution is %s x %s' % (str(m.shape[1]),str(m.shape[0]))
cap.set(3,m.shape[1])
cap.set(4,m.shape[0])
# Set Pixel Threshold
ret,frame = cap.read()
storedImage = np.array(e * 255, dtype = np.uint8)
# have to convert the float32 to uint8
storedMode = imageTools.Blur(storedImage)
storedFrame = imageTools.grayBlur(frame)
pixThreshold = int(np.floor( pixThreshold * storedFrame.shape[0] ))
print('PixelThreshold is %i') % pixThreshold
cenData = np.zeros([ saveFreq, len(np.unique(w))*2 -2])
pixData = np.zeros([ saveFreq, len(np.unique(w)) + 1])
i = 0 # a counter for saving chunks of data
totalFrames = 0
startTime = datetime.now()
oldTime = startTime
elapsed = 0
print('Analyzing motion data...')
frame_roi = []
while(cap.isOpened()):
#print "frames", totalFrames
ret,frame = cap.read()
if ret == False:
print 'End of Video'
break
currentFrame = imageTools.grayBlur(frame)
currentFrame2 = imageTools.grayBlur(frame)
diffpix = imageTools.diffImage(storedFrame,currentFrame2,pixThreshold,displayDiffs)
#print np.shape(diffpix)
#print diffpix # This is 660x1088
diff = imageTools.trackdiffImage(storedMode,currentFrame,pixThreshold,displayDiffs)
diff.dtype = np.uint8
_,contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
MIN_THRESH = 20.0
MIN_THRESH_P = 20.0
#if cv2.contourArea(contours[0]) > MIN_THRESH:
# M = cv2.moments(contours[0])
# cX = int(M["m10"] / M["m00"])
# cY = int(M["m01"] / M["m00"])
# print cX,cY
roi_dict = {}
for r in range(0,numberofwells):
roi_dict[r+1] = []
for cs in range(0,len(contours)):
if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:
#if cv2.contourArea(contours[cs]) > MIN_THRESH and cv2.arcLength(contours[cs],True) > MIN_THRESH_P:
M = cv2.moments(contours[cs])
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
print i, " cX and cY:", cX, cY
#print lmaxx
#print lmaxy
r=1
c=1
for x in range(0,len(lmaxx)):
if cX > lmaxx[x]:
r=x+2
print "Lx,cX,lmaxx[x],lmin[x] ",x, cX, lmaxx[x], lminx[x]
for y in range(0, len(lmaxy)):
if cY > lmaxy[y]:
c=y+2
print "Ly,cY,maxy[y],lmin[x],r,c ",y, cY, lmaxy[y], lminy[y],r,c
area = cv2.contourArea(contours[cs])
perim = cv2.arcLength(contours[cs],True)
perim = cv2.arcLength(contours[cs],True)
print "L r + c + row[c]: ", r, c, rowold[c]," final well: ", r + rowold[c]
if not roi_dict[r+rowold[c]]:
# roi_dict[r+row[c]].append((area*perim))
roi_dict[r+rowold[c]].append((area*perim,cX,cY))
#roi_dict[r+rowold[c]].append((area*perim,contours[cs]))
else:
if roi_dict[r+rowold[c]] < area*perim:
roi_dict[r+rowold[c]][0] = (area*perim,cX,cY)
print len(maxxs), maxxs, maxys, minxs, minys
for x in range(0,len(maxxs)):
if cX > maxxs[x]:
r=x+1 # maybe DONT ADD TWO?
#r=x+2 # maybe DONT ADD TWO?
print "x,cX,maxx[x],minx[x],r,c: ",x, cX, maxxs[x], minxs[x],r,c
for y in range(0, len(maxys)):
if cY > maxys[y]:
c=y+1
#c=y+2
print "y,cY,maxy[y].miny[y],r,c", y, cY, maxys[y], minys[y],r,c
area = cv2.contourArea(contours[cs])
perim = cv2.arcLength(contours[cs],True)
print "r + c + r/8+1 + c/12: ", r, c, r/8+1, c/12, " row[c/12]: ", row[c/12], " final well: ", r/8 + 1 + row[c/12]
if not roi_dict[r/8+1+row[c/12]]:
# roi_dict[r+row[c]].append((area*perim))
roi_dict[r/8+1+row[c/12]].append((area*perim,cX,cY))
#roi_dict[r+row[c]].append((area*perim,contours[cs]))
else:
if roi_dict[r/8+1+row[c/12]] < area*perim:
roi_dict[r/8+1+row[c/12]][0] = (area*perim,cX,cY)
frame_roi.append(roi_dict)
timeDiff = 1. / frameRate
elapsed = elapsed + timeDiff
pixcounts = []
pixcounts = np.bincount(w, weights=diffpix.ravel())
pixData[i,:] = np.hstack((elapsed,pixcounts))
counts = []
keys = roi_dict.keys()
keys.sort()
for k in keys:
x = -10000
y = -10000
if roi_dict[k]:
x = roi_dict[k][0][1]
y = roi_dict[k][0][2]
counts.append(x)
counts.append(y)
cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)
if i == 284:
cv2.imwrite('trackedimagewithlines_' + str(i) + ".png", storedImage)
cenData[i,:] = np.asarray(counts)
totalFrames += 1
storedFrame = currentFrame
i += 1
file = open(videoStream + ".centroid2",'w')
for x in range(0,285):
for y in range(0,192):
file.write(str(int(cenData[x,:][y])) + '\n')
pixData = pixData[:i,:]
pixData = pixData[:,2:] # get rid of timing column and background column
file = open(videoStream + ".motion2",'w')
file.write("12/8/2015" + '\015')
for x in range(0,285):
#print "x", x
for y in range(0,numberofwells):
file.write(str(int(pixData[x,:][y])) + '\n')
vidInfo = {}
# release camera
cap.release()
cv2.destroyAllWindows()
return vidInfo
def cmdLine(pixThreshold,frameRate,videoStream):
vidInfo = main(pixThreshold,frameRate,videoStream)
return vidInfo
if __name__ == '__main__':
pixThreshold = imageTools.getPixThreshold(0.032)
frameRate = imageTools.getFrameRate() # default is 30
videoStream = imageTools.getVideoStream(sys.argv)
vidInfo = cmdLine(pixThreshold,frameRate,videoStream)
|
mit
|
bolkedebruin/airflow
|
tests/providers/presto/hooks/test_presto.py
|
1
|
4355
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from unittest.mock import patch
from prestodb.transaction import IsolationLevel
from airflow.models import Connection
from airflow.providers.presto.hooks.presto import PrestoHook
class TestPrestoHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='hive',
)
class UnitTestPrestoHook(PrestoHook):
conn_name_attr = 'presto_conn_id'
self.db_hook = UnitTestPrestoHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@patch('airflow.providers.presto.hooks.presto.prestodb.auth.BasicAuthentication')
@patch('airflow.providers.presto.hooks.presto.prestodb.dbapi.connect')
def test_get_conn(self, mock_connect, mock_basic_auth):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(catalog='hive', host='host', port=None, http_scheme='http',
schema='hive', source='airflow', user='login', isolation_level=0,
auth=mock_basic_auth('login', 'password'))
class TestPrestoHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestPrestoHook(PrestoHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
def get_isolation_level(self):
return IsolationLevel.READ_COMMITTED
self.db_hook = UnitTestPrestoHook()
@patch('airflow.hooks.dbapi_hook.DbApiHook.insert_rows')
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",),
("world",)]
target_fields = None
commit_every = 10
self.db_hook.insert_rows(table, rows, target_fields, commit_every)
mock_insert_rows.assert_called_once_with(table, rows, None, 10)
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook.get_first(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook.get_records(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])
self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])
self.cur.execute.assert_called_once_with(statement, None)
|
apache-2.0
|
snowch/movie-recommender-demo
|
web_app/app/main/views.py
|
1
|
8563
|
try:
import impyla
except ImportError:
print("Installing missing impyla")
import pip
pip.main(['install', '--no-deps', 'impyla'])
try:
import thrift_sasl
except ImportError:
print("Installing missing thrift_sasl")
import pip
# need a patched version of thrift_sasl. see https://github.com/cloudera/impyla/issues/238
pip.main(['install', '--no-deps', 'git+https://github.com/snowch/thrift_sasl'])
from flask import Flask, render_template, session, redirect, url_for, request, flash
from flask.ext.login import login_required, current_user
from . import forms
from . import main
from .. import app
#from . import app
from ..models import Movie, Recommendation, Rating, User
from ..dao import RecommendationsNotGeneratedException, RecommendationsNotGeneratedForUserException
import flask
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.util.string import encode_utf8
from impala.dbapi import connect
from impala.util import as_pandas
from .. import messagehub_client
import time
import json
@main.route('/', methods=['GET'])
def index():
return render_template('/main/home.html',
movies = session.get('movies'))
@main.route('/recommendations', methods=['GET', 'POST'])
def recommendations():
user_id = current_user.get_id()
if not user_id:
flash('Recommendations are only available if you have an account.')
return render_template('/main/recommendations.html', recommendations=[])
else:
rated_movies = Rating.get_ratings(current_user.get_id())
# the user needs to have rated some movies to be able to receive recommendations
if len(rated_movies.keys()) == 0 and current_user.get_id():
flash('No Recommendations found, please rate some movies.')
return render_template('/main/recommendations.html', recommendations=[], timestamp=None)
try:
timestamp = Recommendation.get_latest_recommendation_timestamp()
(recommendation_type, recommendations) = \
Recommendation.get_recommendations(current_user.get_id())
except RecommendationsNotGeneratedException:
flash('No recommendations available - the Recommendation process has not run yet.')
return render_template('/main/recommendations.html', recommendations=[])
except RecommendationsNotGeneratedForUserException:
flash('No Recommendations found, please rate some movies.')
return render_template('/main/recommendations.html', recommendations=[], timestamp=timestamp)
if recommendation_type:
flash("Recommendation type: " + recommendation_type)
return render_template('/main/recommendations.html', recommendations=recommendations, timestamp=timestamp)
@main.route('/search', methods=['POST'])
def search():
form = forms.SearchForm()
session['search_string'] = form.search_string.data
search_string = session.get('search_string')
if search_string:
search_string = search_string.strip()
session['movies'] = Movie.find_movies(
current_user.get_id(),
session.get('search_string')
)
else:
session['movies'] = []
return render_template('/main/search_results.html',
search_string = search_string,
movies = session.get('movies'))
@main.route('/set_rating', methods=['POST'])
@login_required
def set_rating():
if not request.json or \
not 'movie_id' in request.json or \
not 'user_id' in request.json or \
not 'rating' in request.json:
abort(400)
movie_id = request.json['movie_id']
user_id = request.json['user_id']
rating = request.json['rating']
if rating == '-':
Rating.save_rating(movie_id, user_id, None)
else:
Rating.save_rating(movie_id, user_id, int(rating))
return('{ "success": "true" }')
def get_hive_cursor():
# TODO move Hive code to a new file hive_dao.py
if not app.config['BI_HIVE_ENABLED']:
return render_template('/main/bi_not_enabled.html')
BI_HIVE_HOSTNAME = app.config['BI_HIVE_HOSTNAME']
BI_HIVE_USERNAME = app.config['BI_HIVE_USERNAME']
BI_HIVE_PASSWORD = app.config['BI_HIVE_PASSWORD']
# TODO probably want to cache the connection rather than
# instantiate it on every request
# Note that BigInsights Enterprise clusters will need to specify the
# ssl certificate because it is self-signed.
try:
conn = connect(
host=BI_HIVE_HOSTNAME,
port=10000,
use_ssl=True,
auth_mechanism='PLAIN',
user=BI_HIVE_USERNAME,
password=BI_HIVE_PASSWORD
)
except:
return None
return conn.cursor()
@main.route("/report")
def report():
cursor = get_hive_cursor()
if cursor is None:
return render_template('/main/bi_connection_issue.html')
# FIXME we probably want to create aggregates on hadoop
# and cache them rather than returning the whole data
# set here
# we need to ignore monitoring pings which have rating user_id = -1
# and movie_id = -1
try:
cursor.execute(
"select * from movie_ratings where customer_id <> '-1' and movie_id <> '-1'",
configuration={
'hive.mapred.supports.subdirectories': 'true',
'mapred.input.dir.recursive': 'true'
})
except:
return render_template('/main/bi_connection_issue.html')
df = as_pandas(cursor)
count = df.shape[0]
if count == 0:
return render_template('/main/bi_no_records.html')
from bokeh.charts import Bar, output_file, show
fig = Bar(
df,
label='movie_ratings.rating',
values='movie_ratings.rating',
agg='count',
title='Distribution of movie ratings',
legend=False
)
fig.plot_height = 400
fig.xaxis.axis_label = 'Rating'
fig.yaxis.axis_label = 'Count ( Rating )'
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
script, div = components(fig)
html = flask.render_template(
'/main/embed.html',
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,
)
return encode_utf8(html)
def check_auth(username, password):
user = User.find_by_email(username)
if user is not None and user.verify_password(password):
return True
else:
return False
# This method keeps a thread open for a long time which is
# not ideal, but is the simplest way of checking.
@main.route("/monitor")
def monitor():
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
data = { "error": "Permission denied." }
response = app.response_class(
response=json.dumps(data),
status=550,
mimetype='application/json'
)
return response
cursor = get_hive_cursor()
if cursor is None:
data = { "error": "Could not connect to Hive" }
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
return response
timestamp = time.time()
message = '{0},{1},{2}'.format(-1, -1, timestamp)
messagehub_client.send_message( message )
time.sleep(70)
cursor.execute(
'select * from movie_ratings where rating = {0}'.format(timestamp),
configuration={
'hive.mapred.supports.subdirectories': 'true',
'mapred.input.dir.recursive': 'true'
})
df = as_pandas(cursor)
count = df.shape[0]
if count == 1:
data = { "ok": "App rating found in hadoop." }
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
else:
data = { "error": "App rating not found in hadoop." }
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
return response
|
apache-2.0
|
wazeerzulfikar/scikit-learn
|
examples/model_selection/plot_roc.py
|
102
|
5056
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
nicococo/tilitools
|
scripts/exm_ssad.py
|
1
|
2143
|
import numpy as np
import cvxopt as co
import matplotlib.pyplot as plt
from tilitools.ssad_convex import ConvexSSAD
from tilitools.utils_kernel import get_kernel
if __name__ == '__main__':
# example constants (training set size and splitting)
k_type = 'rbf'
# attention: this is the shape parameter of a Gaussian
# which is 1/sigma^2
k_param = 2.4
N_pos = 10
N_neg = 10
N_unl = 10
# generate training labels
Dy = np.zeros(N_pos+N_neg+N_unl, dtype=np.int)
Dy[:N_pos] = 1
Dy[N_pos+N_unl:] = -1
# generate training data
co.setseed(11)
Dtrainp = co.normal(2,N_pos)*0.6
Dtrainu = co.normal(2,N_unl)*0.6
Dtrainn = co.normal(2,N_neg)*0.6
Dtrain21 = Dtrainn-1
Dtrain21[0,:] = Dtrainn[0,:]+1
Dtrain22 = -Dtrain21
# training data
Dtrain = co.matrix([[Dtrainp], [Dtrainu], [Dtrainn+0.8]])
Dtrain = np.array(Dtrain)
# build the training kernel
kernel = get_kernel(Dtrain, Dtrain, type=k_type, param=k_param)
# use SSAD
ssad = ConvexSSAD(kernel, Dy, 1./(10.*0.1), 1./(10.*0.1), 1., 1/(10.*0.1))
ssad.fit()
# generate test data from a grid for nicer plots
delta = 0.25
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
(sx,sy) = X.shape
Xf = np.reshape(X, (1, sx*sy))
Yf = np.reshape(Y, (1, sx*sy))
Dtest = np.append(Xf, Yf, axis=0)
print(Dtest.shape)
# build the test kernel
kernel = get_kernel(Dtest, Dtrain[:, ssad.svs], type=k_type, param=k_param)
res = ssad.apply(kernel)
# make a nice plot of it
fig = plt.figure(1)
Z = np.reshape(res,(sx,sy))
plt.contourf(X, Y, Z, 20, cmap='Blues')
plt.colorbar()
plt.contour(X, Y, Z, np.linspace(0.0, np.max(res), 10))
# plt.contour(X, Y, Z, [-0.6, 0., 0.6])
plt.scatter(Dtrain[0, ssad.svs], Dtrain[1, ssad.svs], 60, c='w')
plt.scatter(Dtrain[0,N_pos:N_pos+N_unl-1],Dtrain[1,N_pos:N_pos+N_unl-1], 10, c='g')
plt.scatter(Dtrain[0,0:N_pos],Dtrain[1,0:N_pos], 20, c='r')
plt.scatter(Dtrain[0,N_pos+N_unl:],Dtrain[1,N_pos+N_unl:], 20, c='b')
plt.show()
|
mit
|
cactusbin/nyt
|
matplotlib/lib/mpl_toolkits/mplot3d/art3d.py
|
6
|
18309
|
#!/usr/bin/python
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
zs = float(zs)
zs = [zs for x in xs]
except:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = segments
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [zip(xs, ys) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(zip(vxs, vys))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = zip(*s)
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(zip(vxs, vys), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
PatchCollection.__init__(self, *args, **kwargs)
self._old_draw = lambda x: PatchCollection.draw(self, x)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = zip(*self.get_offsets())
else:
xs = [0] * len(zs)
ys = [0] * len(zs)
self._offsets3d = juggle_axes(xs, ys, zs, zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
#FIXME: mpl allows us no way to unset the collection alpha value
self._alpha = None
self.set_facecolors(zalpha(self._facecolor3d, vzs))
self.set_edgecolors(zalpha(self._edgecolor3d, vzs))
PatchCollection.set_offsets(self, zip(vxs, vys))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def draw(self, renderer):
self._old_draw(renderer)
def patch_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PatchCollection to a Patch3DCollection object."""
# The tricky part here is that there are several classes that are
# derived from PatchCollection. We need to use the right draw method.
col._old_draw = col.draw
col.__class__ = Patch3DCollection
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
self.set_zsort(kwargs.pop('zsort', True))
PolyCollection.__init__(self, verts, *args, **kwargs)
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = zip(*points)
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei]) \
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
z_segments_2d = [(self._zsortfunc(zs), zip(xs, ys), fc, ec) for
(xs, ys, zs), fc, ec in zip(xyzlist, cface, cedge)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError, "whoops"
segments_2d = [s for z, s, fc, ec in z_segments_2d]
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts(segments_3d)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.colorConverter.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError, 'unknown color format %s' % c
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
|
unlicense
|
potash/scikit-learn
|
sklearn/tests/test_kernel_ridge.py
|
342
|
3027
|
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
|
bsd-3-clause
|
AlexRobson/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
130
|
50966
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
huzq/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
14
|
7770
|
import sys
import re
import pytest
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils._testing import (assert_almost_equal, assert_array_equal,
assert_allclose)
from sklearn.datasets import load_digits
from io import StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
Xdigits, _ = load_digits(return_X_y=True)
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert np.all((X_sampled != X_sampled2).max(axis=1))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert (rbm1.score_samples(X) < -300).all()
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s", s)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("dtype_in, dtype_out", [
(np.float32, np.float32),
(np.float64, np.float64),
(int, np.float64)])
def test_transformer_dtypes_casting(dtype_in, dtype_out):
X = Xdigits[:100].astype(dtype_in)
rbm = BernoulliRBM(n_components=16, batch_size=5, n_iter=5,
random_state=42)
Xt = rbm.fit_transform(X)
# dtype_in and dtype_out should be consistent
assert Xt.dtype == dtype_out, ('transform dtype: {} - original dtype: {}'
.format(Xt.dtype, X.dtype))
def test_convergence_dtype_consistency():
# float 64 transformer
X_64 = Xdigits[:100].astype(np.float64)
rbm_64 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5,
random_state=42)
Xt_64 = rbm_64.fit_transform(X_64)
# float 32 transformer
X_32 = Xdigits[:100].astype(np.float32)
rbm_32 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5,
random_state=42)
Xt_32 = rbm_32.fit_transform(X_32)
# results and attributes should be close enough in 32 bit and 64 bit
assert_allclose(Xt_64, Xt_32,
rtol=1e-06, atol=0)
assert_allclose(rbm_64.intercept_hidden_, rbm_32.intercept_hidden_,
rtol=1e-06, atol=0)
assert_allclose(rbm_64.intercept_visible_, rbm_32.intercept_visible_,
rtol=1e-05, atol=0)
assert_allclose(rbm_64.components_, rbm_32.components_,
rtol=1e-03, atol=0)
assert_allclose(rbm_64.h_samples_, rbm_32.h_samples_)
|
bsd-3-clause
|
shenzebang/scikit-learn
|
examples/linear_model/plot_ols.py
|
220
|
1940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
dssg/wikienergy
|
disaggregator/build/pandas/pandas/core/panel.py
|
1
|
50840
|
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
from pandas.compat import (map, zip, range, lrange, lmap, u, OrderedDict,
OrderedDefaultdict)
from pandas import compat
import sys
import warnings
import numpy as np
from pandas.core.common import (PandasError, _try_sort, _default_index,
_infer_dtype_from_scalar, notnull)
from pandas.core.categorical import Categorical
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import _maybe_droplevels, _is_list_like
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.tools.util import cartesian_product
from pandas import compat
from pandas.util.decorators import (deprecate, Appender, Substitution,
deprecate_kwarg)
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.nanops as nanops
import pandas.computation.expressions as expressions
from pandas import lib
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0,1,2,'items','major_axis','minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n %s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=['time', 'panel']):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
time, panels = _ensure_like_indices(time, panels)
time_factor = Categorical.from_array(time)
panel_factor = Categorical.from_array(panels)
labels = [time_factor.codes, panel_factor.codes]
levels = [time_factor.categories, panel_factor.categories]
return MultiIndex(levels, labels, sortorder=None, names=names,
verify_integrity=False)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.get(a) for a in self._AXIS_ORDERS]
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v) for k, v
in compat.iteritems(data) if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i)
if a is None else a for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if lib.isscalar(key):
return super(Panel, self).__getitem__(key)
return self.ix[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = _maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(
self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
#----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
#----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
com.pprint_thing(v[0]),
com.pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]
fromDict = from_dict
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparsePanel
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparsePanel
frames = dict(compat.iteritems(self))
return SparsePanel(frames, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=kind,
default_fill_value=fill_value)
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in compat.iteritems(self):
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
#----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.get('takeable')
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.get('takeable')
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(
axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
com._possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError(
'shape of value must be {0}, shape of given object was '
'{1}'.format(shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif np.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def dropna(self, axis=0, how='any', inplace=False, **kwargs):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = com.notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif np.isscalar(other):
return self._combine_const(other, func)
def _combine_const(self, other, func):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key, copy=None):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
copy : boolean [deprecated]
Whether to make a copy of the data
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key, copy=None):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
copy : boolean [deprecated]
Whether to make a copy of the data
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1, copy=None):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
copy : boolean [deprecated]
Whether to make a copy of the data
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if _is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels,
names=names, verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along input axis of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', then the combination of major_axis/minor_axis
will be passed a Series
axis : {'major', 'minor', 'items'}
Additional keyword arguments will be passed as keywords to the function
Examples
--------
>>> p.apply(numpy.sqrt) # returns a Panel
>>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)
>>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)
>>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)
Returns
-------
result : Pandas Object
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple,list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ax = self._get_axis(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0]*(ndim-1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [ self._get_axis(axi) for axi in indlist ]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([ p[i] for p in points ])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):
slice_indexer[n-1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0],Series):
arr = np.vstack([ r.values for r in results ])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis]+indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr,**self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results,planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [ self._get_axis_number(a) for a in axis ]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [ slice(None,None) ] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e,obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError(
'Panel.{0} does not implement numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None, **kwargs):
""" return the type for the ndim of the result """
ndim = getattr(result,'ndim',None)
# need to assume they are the same
if ndim is None:
if isinstance(result,dict):
ndim = getattr(list(compat.itervalues(result))[0],'ndim',None)
# a saclar result
if ndim is None:
ndim = 0
# have a dict, so top-level is +1 dim
else:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
""" return the construction dictionary for these axes """
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None
else kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None
else kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None
else kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None
else kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
return super(Panel, self).transpose(*args, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i,dtype='int64')
return self._wrap_result(result, axis)
@deprecate_kwarg(old_arg_name='lags', new_arg_name='periods')
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift major or minor axis by specified number of leads/lags. Drops
periods right now compared with DataFrame.shift
Parameters
----------
lags : int
axis : {'major', 'minor'}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
if axis == 'items':
raise ValueError('Invalid axis')
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major', **kwds):
return super(Panel, self).tshift(periods, freq, axis, **kwds)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs) for i, a
in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -
len(axes):], axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
@Substitution(name)
@Appender(_agg_doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'],
info_axis=0,
stat_axis=1,
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
WidePanel = Panel
LongPanel = DataFrame
|
mit
|
ifuding/Kaggle
|
SVPC/Code/philly/FreshStart.py
|
1
|
9617
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
# print(os.listdir("../input"))
import lightgbm as lgb
from sklearn.model_selection import *
from sklearn.metrics import mean_squared_error, make_scorer
from scipy.stats import mode, skew, kurtosis, entropy
from sklearn.ensemble import ExtraTreesRegressor
# import matplotlib.pyplot as plt
# import seaborn as sns
# import dask.dataframe as dd
# from dask.multiprocessing import get
from tqdm import tqdm, tqdm_notebook
tqdm.pandas(tqdm_notebook)
import concurrent.futures
import time
import pickle
from leak_cols import *
leak_list = LEAK_LIST
# Any results you write to the current directory are saved as output.
path = "../../Data/"
train = pd.read_csv(path + "train.csv", index_col = 'ID')
test = pd.read_csv(path + "test.csv", index_col = 'ID')
debug = False
if debug:
train = train[:1000]
test = test[:1000]
IsTrain = False
transact_cols = [f for f in train.columns if f not in ["ID", "target"]]
y = np.log1p(train["target"]).values
cols = ['f190486d6', '58e2e02e6', 'eeb9cd3aa', '9fd594eec', '6eef030c1',
'15ace8c9f', 'fb0f5dbfe', '58e056e12', '20aa07010', '024c577b9',
'd6bb78916', 'b43a7cfd5', '58232a6fb', '1702b5bf0', '324921c7b',
'62e59a501', '2ec5b290f', '241f0f867', 'fb49e4212', '66ace2992',
'f74e8f13d', '5c6487af1', '963a49cdc', '26fc93eb7', '1931ccfdd',
'703885424', '70feb1494', '491b9ee45', '23310aa6f', 'e176a204a',
'6619d81fc', '1db387535', 'fc99f9426', '91f701ba2', '0572565c2',
'190db8488', 'adb64ff71', 'c47340d97', 'c5a231d81', '0ff32eb98']
from multiprocessing import Pool
CPU_CORES = 1
NZ_NUM = 3
def _get_leak(df, cols, search_ind, lag=0):
""" To get leak value, we do following:
1. Get string of all values after removing first two time steps
2. For all rows we shift the row by two steps and again make a string
3. Just find rows where string from 2 matches string from 1
4. Get 1st time step of row in 3 (Currently, there is additional condition to only fetch value if we got exactly one match in step 3)"""
f1 = [] #cols[:((lag+2) * -1)]
f2 = [] #cols[(lag+2):]
for ef in leak_list:
f1 += ef[:((lag+2) * -1)]
f2 += ef[(lag+2):]
series_str = df[f2]
nz = series_str.apply(lambda x: len(x[x!=0]), axis=1)
series_str = series_str[nz >= NZ_NUM]
series_str = series_str.apply(lambda x: "_".join(x.round(2).astype(str)), axis=1)
series_str = series_str.drop_duplicates(keep = False) #[(~series_str.duplicated(keep = False)) | (df[cols[lag]] != 0)]
series_shifted_str = df.loc[search_ind, f1].apply(lambda x: "_".join(x.round(2).astype(str)), axis=1)
target_rows = series_shifted_str.progress_apply(lambda x: np.where(x == series_str.values)[0])
# print(target_rows)
# del series_str, series_shifted_str
# target_vals = target_rows.apply(lambda x: df.loc[series_str.index[x[0]], cols[lag]] if len(x)==1 else 0)
target_vals = target_rows.apply(lambda x: df.loc[series_str.index[x[0]], cols[lag]] if len(x) == 1 else 0)
# if (len(x) > 0 and df.loc[series_str.index[x], cols[lag]].nunique() == 1) else 0)
return target_vals, lag
def get_all_leak(df, cols=None, nlags=15):
"""
We just recursively fetch target value for different lags
"""
df = df.copy()
# print(df.head())
# with Pool(processes=CPU_CORES) as p:
if True:
begin_ind = 0
end_ind = 0
leak_target = pd.Series(0, index = df.index)
while begin_ind < nlags:
end_ind = min(begin_ind + CPU_CORES, nlags)
search_ind = (leak_target == 0)
# print(search_ind)
print('begin_ind: ', begin_ind, 'end_ind: ', end_ind, "search_ind_len: ", search_ind.sum())
# res = [p.apply_async(_get_leak, args=(df, cols, search_ind, i)) for i in range(begin_ind, end_ind)]
# for r in res:
# target_vals, lag = r.get()
# # print ('target_vale', target_vals.head())
# # leak_target[target_vals.index] = target_vals
# df['leak_target_' + str(lag)] = target_vals
target_vals, lag = _get_leak(df, cols, search_ind, begin_ind)
df['leak_target_' + str(lag)] = target_vals
for i in range(begin_ind, end_ind):
leak_target[leak_target == 0] = df.loc[leak_target == 0, 'leak_target_' + str(i)]
leak_train = 0 #leak_target[train.index]
leak_train_len = 0 #leak_train[leak_train != 0].shape[0]
leak_test_len = 0 #leak_target[test.index][leak_target != 0].shape[0]
leak_train_right_len = 0 #leak_train[leak_train.round(0) == train['target'].round(0)].shape[0]
leak_train_right_ratio = 0 #leak_train_right_len / leak_train_len
if IsTrain:
leak_train = leak_target[train.index]
# print (leak_train.head())
leak_train_len = leak_train[leak_train != 0].shape[0]
leak_train_right_len = leak_train[leak_train.round(0) == train['target'].round(0)].shape[0]
leak_train_right_ratio = leak_train_right_len / leak_train_len
else:
leak_test_len = leak_target[test.index][leak_target != 0].shape[0]
print('Find leak in train and test: ', leak_train_len, leak_test_len, \
"leak train right: ", leak_train_right_len, leak_train_right_ratio)
begin_ind = end_ind
# for i in range(nlags):
# df.loc[df['leak_target'] == 0, 'leak_target'] = df.loc[df['leak_target'] == 0, 'leak_target_' + str(i)]
df['leak_target'] = leak_target
return df
def get_pred(data, lag=2):
d1 = data[FEATURES[:-lag]].apply(tuple, axis=1).to_frame().rename(columns={0: 'key'})
d2 = data[FEATURES[lag:]].apply(tuple, axis=1).to_frame().rename(columns={0: 'key'})
d2['pred'] = data[FEATURES[lag - 2]]
d3 = d2[~d2.duplicated(['key'], keep=False)]
return d1.merge(d3, how='left', on='key').pred.fillna(0)
def get_all_pred(data, max_lag):
target = pd.Series(index=data.index, data=np.zeros(data.shape[0]))
for lag in range(2, max_lag + 1):
pred = get_pred(data, lag)
mask = (target == 0) & (pred != 0)
target[mask] = pred[mask]
return target
test["target"] = 0 #train["target"].mean()
# all_df = pd.concat([train[["ID", "target"] + cols], test[["ID", "target"]+ cols]]).reset_index(drop=True)
# all_df = pd.concat([train[["target"] + cols], test[["target"]+ cols]]) #.reset_index(drop=True)
# all_df.head()
NLAGS = 38 #Increasing this might help push score a bit
used_col = ["target"] + [col for cols in leak_list for col in cols]
print ('used_col length: ', len(used_col))
if IsTrain:
all_df = get_all_leak(train[used_col], cols=cols, nlags=NLAGS)
else:
all_df = get_all_leak(test[used_col], cols=cols, nlags=NLAGS)
if IsTrain:
all_df[['target', 'leak_target']].to_csv(path + 'train_add_featrure_set_target_leaktarget_' + str(NLAGS) + "_" + str(NZ_NUM) + '.csv')
else:
all_df[['target', 'leak_target']].to_csv(path + 'test_add_featrure_set_target_leaktarget_' + str(NLAGS) + "_" + str(NZ_NUM) + '.csv')
# with open(path + 'leak_target_' + str(NLAGS) + '.pickle', 'wb+') as handle:
# pickle.dump(all_df[['target', 'leak_target']], handle, protocol=pickle.HIGHEST_PROTOCOL)
sub = pd.read_csv(path + 'sub_2018_08_13_03_19_33.csv', index_col = 'ID')
leak_target = all_df['leak_target'][test.index]
# print(leak_target)
sub.loc[leak_target[leak_target != 0].index, 'target'] = leak_target[leak_target != 0]
if not IsTrain:
time_label = time.strftime('_%Y_%m_%d_%H_%M_%S', time.gmtime())
sub.to_csv(path + 'leak_sub_' + str(NLAGS) + "_" + time_label + '.csv')
exit(0)
leaky_cols = ["leaked_target_"+str(i) for i in range(NLAGS)]
train = train.join(all_df.set_index("ID")[leaky_cols], on="ID", how="left")
test = test.join(all_df.set_index("ID")[leaky_cols], on="ID", how="left")
train[["target"]+leaky_cols].head(10)
train["nonzero_mean"] = train[transact_cols].apply(lambda x: np.expm1(np.log1p(x[x!=0]).mean()), axis=1)
test["nonzero_mean"] = test[transact_cols].apply(lambda x: np.expm1(np.log1p(x[x!=0]).mean()), axis=1)
#We start with 1st lag target and recusrsively fill zero's
train["compiled_leak"] = 0
test["compiled_leak"] = 0
for i in range(NLAGS):
train.loc[train["compiled_leak"] == 0, "compiled_leak"] = train.loc[train["compiled_leak"] == 0, "leaked_target_"+str(i)]
test.loc[test["compiled_leak"] == 0, "compiled_leak"] = test.loc[test["compiled_leak"] == 0, "leaked_target_"+str(i)]
print("Leak values found in train and test ", sum(train["compiled_leak"] > 0), sum(test["compiled_leak"] > 0))
print("% of correct leaks values in train ", sum(train["compiled_leak"] == train["target"])/sum(train["compiled_leak"] > 0))
# train.loc[train["compiled_leak"] == 0, "compiled_leak"] = train.loc[train["compiled_leak"] == 0, "nonzero_mean"]
# test.loc[test["compiled_leak"] == 0, "compiled_leak"] = test.loc[test["compiled_leak"] == 0, "nonzero_mean"]
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(y, np.log1p(train["compiled_leak"]).fillna(14.49)))
#submission
sub = test[["ID"]]
sub["target"] = test["compiled_leak"]
sub.to_csv(path + "baseline_submission_with_leaks.csv", index=False)
|
apache-2.0
|
semio/ddf_utils
|
ddf_utils/io.py
|
1
|
7157
|
# -*- coding: utf-8 -*-
"""io functions for ddf files"""
import os
import shutil
import json
import threading
import time
import typing
from urllib.parse import urlsplit
from io import BytesIO
import pandas as pd
import requests as req
from ddf_utils.str import format_float_digits
from ddf_utils.package import get_datapackage
# helper for dumping datapackage json
def dump_json(path, obj):
"""convenient function to dump a dictionary object to json"""
with open(path, 'w+') as f:
json.dump(obj, f, ensure_ascii=False, indent=4)
f.close()
# TODO: integrate with Ingredient.serve
def serve_datapoint(df_: pd.DataFrame, out_dir, concept, copy=True,
by: typing.Iterable = None,
formatter: typing.Callable = format_float_digits, **kwargs):
"""save a pandas dataframe to datapoint file.
the file path of csv will be out_dir/ddf--datapoints--$concept--$by.csv
addition keyword arguments can be passed to `pd.DataFrame.to_csv()` function.
"""
if copy:
df = df_.copy()
else:
df = df_
# formatting the concept column
if formatter is not None:
df[concept] = df[concept].map(formatter)
if by is None:
by = df.index.names
by = '--'.join(by)
path = os.path.join(out_dir, 'ddf--datapoints--{}--by--{}.csv'.format(concept, by))
df.to_csv(path, **kwargs)
def serve_concept():
pass
def serve_entity():
pass
def open_google_spreadsheet(docid):
"""read google spreadsheet into excel io object"""
tmpl_xls = "https://docs.google.com/spreadsheets/d/{docid}/export?format=xlsx&id={docid}"
url = tmpl_xls.format(docid=docid)
res = req.get(url)
if res.ok:
return BytesIO(res.content)
return None
def cleanup(path, how='ddf', exclude=None, use_default_exclude=True):
"""remove all ddf files in the given path"""
default_exclude = ['etl', 'lang', 'langsplit', 'datapackage.json', 'README.md', 'assets']
if exclude and not isinstance(exclude, list):
if isinstance(exclude, tuple):
exclude = list(exclude) # this is not working for str. and [exclude] not working for tuple
else:
exclude = [exclude]
if use_default_exclude:
if exclude:
for e in default_exclude:
exclude.append(e)
else:
exclude = default_exclude
if how == 'ddf':
for f in os.listdir(path):
# only keep dot files and etl/ lang/ langsplit/ and datapackage.json
if f not in exclude and not f.startswith('.'):
p = os.path.join(path, f)
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
# TODO: think a best way to handle metadata in datapackage.json
# if os.path.exists(os.path.join(path, 'datapackage.json')):
# os.remove(os.path.join(path, 'datapackage.json'))
if how == 'lang':
if os.path.exists(os.path.join(path, 'lang')):
shutil.rmtree(os.path.join(path, 'lang'))
if how == 'langsplit':
if os.path.exists(os.path.join(path, 'langsplit')):
shutil.rmtree(os.path.join(path, 'langsplit'))
def download_csv(urls, out_path):
"""download csv files"""
def download(url_, out_path_):
r = req.get(url_, stream=True)
total_length = int(r.headers.get('content-length'))
if total_length == 0:
return
fn = urlsplit(url_).path.split('/')[-1]
print('writing to: {}\n'.format(fn), end='')
with open(os.path.join(out_path, fn), 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
def create_tread(url_, out_path_):
download_thread = threading.Thread(target=download, args=(url_, out_path_))
download_thread.start()
return download_thread
threads = []
for url in urls:
threads.append(create_tread(url, out_path))
# wait until all downloads are done
is_alive = [t.is_alive() for t in threads]
while any(is_alive):
time.sleep(1)
is_alive = [t.is_alive() for t in threads]
def csvs_to_ddf(files, out_path):
"""convert raw files to ddfcsv
Args
----
files: list
a list of file paths to build ddf csv
out_path: `str`
the directory to put the ddf dataset
"""
import re
from os.path import join
from ddf_utils.str import to_concept_id
concepts_df = pd.DataFrame([['name', 'Name', 'string']],
columns=['concept', 'name', 'concept_type'])
concepts_df = concepts_df.set_index('concept')
all_entities = dict()
pattern = r'indicators--by--([ 0-9a-zA-Z_-]*).csv'
for f in files:
data = pd.read_csv(f)
basename = os.path.basename(f)
keys = re.match(pattern, basename).groups()[0].split('--')
keys_alphanum = list(map(to_concept_id, keys))
# check if there is a time column. Assume last column is time.
try:
pd.to_datetime(data[keys[-1]], format='%Y')
except (ValueError, pd.tslib.OutOfBoundsDatetime):
has_time = False
else:
has_time = True
if has_time:
ent_keys = keys[:-1]
else:
ent_keys = keys
# set concept type
for col in data.columns:
concept = to_concept_id(col)
if col in keys:
if col in ent_keys:
t = 'entity_domain'
else:
t = 'time'
else:
t = 'measure'
concepts_df.loc[concept] = [col, t]
for ent in ent_keys:
ent_df = data[[ent]].drop_duplicates().copy()
ent_concept = to_concept_id(ent)
ent_df.columns = ['name']
ent_df[ent_concept] = ent_df.name.map(to_concept_id)
if ent_concept not in all_entities.keys():
all_entities[ent_concept] = ent_df
else:
all_entities[ent_concept] = pd.concat([all_entities[ent_concept], ent_df],
ignore_index=True)
data = data.set_index(keys)
for c in data:
# output datapoints
df = data[c].copy()
df = df.reset_index()
for k in keys[:-1]:
df[k] = df[k].map(to_concept_id)
df.columns = df.columns.map(to_concept_id)
(df.dropna()
.to_csv(join(out_path,
'ddf--datapoints--{}--by--{}.csv'.format(
to_concept_id(c), '--'.join(keys_alphanum))),
index=False))
# output concepts
concepts_df.to_csv(join(out_path, 'ddf--concepts.csv'))
# output entities
for c, df in all_entities.items():
df.to_csv(join(out_path, 'ddf--entities--{}.csv'.format(c)), index=False)
dp = get_datapackage(out_path, use_existing=False)
dump_json(os.path.join(out_path, 'datapackage.json'), dp)
return
|
mit
|
jzt5132/scikit-learn
|
sklearn/metrics/scorer.py
|
211
|
13141
|
"""
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
|
bsd-3-clause
|
archman/genopt
|
genopt/dakopt.py
|
1
|
30122
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" General optimization module by utilizing DAKOTA
* orbit correction: ``DakotaOC``
Tong Zhang <[email protected]>
2016-10-23 14:26:13 PM EDT
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import multiprocessing
import subprocess
import time
from shutil import rmtree
import tempfile
from flame import Machine
import dakutils
from dakutils import test_one_element
try:
from phantasy.library.model import generate_latfile
except:
from dakutils import generate_latfile
class DakotaBase(object):
"""Base class for general optimization, initialized parameters.
Keyword Arguments
-----------------
workdir : str
Root dir for dakota input/output files, the defualt one should be
created in /tmp, or define some dir path.
dakexec : str
Full path of dakota executable, the default one should be *dakota*,
or define the full path.
dakhead : str
Prefixed name for input/output files of *dakota*, the default one is
*dakota*.
keep : bool
If keep the working directory (i.e. defined by *workdir*), default is
False.
"""
def __init__(self, **kws):
# workdir
wdir = kws.get('workdir')
if wdir is None:
#self._workdir = os.path.join('/tmp', 'dakota_' + dakutils.random_string(6))
self._workdir = tempfile.mkdtemp(prefix='dakota_')
else:
self._workdir = wdir
if not os.path.isdir(wdir):
os.makedirs(wdir)
# keep working data?
keepflag = kws.get('keep')
if keepflag is not None and keepflag:
self._keep = True
else:
self._keep = False
# dakexec
if kws.get('dakexec') is not None:
self._dakexec = kws.get('dakexec')
else:
self._dakexec = 'dakota'
# dakhead
if kws.get('dakhead') is not None:
self._dakhead = kws.get('dakhead')
else:
self._dakhead = 'dakota'
self._dakin = None
self._dakout = None
@property
def dakexec(self):
return self._dakexec
@dakexec.setter
def dakexec(self, dakexec):
self._dakexec = dakexec
@property
def workdir(self):
return self._workdir
@workdir.setter
def workdir(self, wdir):
self._workdir = wdir
@property
def dakhead(self):
return self._dakhead
@dakhead.setter
def dakhead(self, nprefix):
self._dakhead = nprefix
@property
def keep(self):
return self._keep
@keep.setter
def keep(self, f):
self._keep = f
def __del__(self):
if not self._keep:
try:
rmtree(self._workdir)
except:
pass
else:
print("work files are kept in: %s" % (self._workdir))
class DakotaOC(DakotaBase):
"""Dakota optimization class with orbit correction driver.
Parameters
----------
lat_file : str
Lattice file name.
elem_bpm : list(int)
List of element indice of BPMs.
elem_cor : list(int)
List of element indice of correctors, always folders of 2.
elem_hcor : list(int)
List of element indice of horizontal correctors.
elem_vcor : list(int)
List of element indice of vertical correctors.
ref_x0 : list(float)
Reference orbit in x, list of BPM readings.
ref_y0 : list(float)
Reference orbit in y, list of BPM readings.
ref_flag : str
String flag for objective functions:
- ``x``: :math:`\sum \Delta x^2`, :math:`\Delta x = x-x_0`;
- ``y``: :math:`\sum \Delta y^2`, :math:`\Delta y = y-y_0`;
- ``xy``: :math:`\sum \Delta x^2 + \sum \Delta y^2`.
model : str
Simulation model, 'flame' or 'impact'.
optdriver : str
Analysis driver for optimization, 'flamedriver_oc' by default.
Keyword Arguments
-----------------
workdir : str
Root dir for dakota input/output files, the defualt one should be
created in /tmp, or define some dir path.
dakexec : str
Full path of dakota executable, the default one should be *dakota*,
or define the full path.
dakhead : str
Prefixed name for input/output files of *dakota*, the default one is
*dakota*.
keep : bool
If keep the working directory (i.e. defined by *workdir*), default is
False.
Note
----
``elem_bpm`` should be treated as index of elemnt with type name of 'BPM',
however, for the simulation convenience, any element is acceptable,
see :func:`set_bpms()`.
"""
def __init__(self,
lat_file=None,
elem_bpm=None,
elem_cor=None,
elem_hcor=None,
elem_vcor=None,
ref_x0=None,
ref_y0=None,
ref_flag=None,
model=None,
optdriver=None,
**kws):
super(self.__class__, self).__init__(**kws)
if lat_file is not None:
self._lat_file = os.path.realpath(os.path.expanduser(lat_file))
else: # use example lattice file
pass
self._elem_bpm = elem_bpm
self._elem_hcor, self._elem_vcor = elem_hcor, elem_vcor
if elem_cor is not None:
self._elem_hcor, self._elem_vcor = elem_cor[0::2], elem_cor[1::2]
elif elem_hcor is not None:
self._elem_hcor = elem_hcor
elif elem_vcor is not None:
self._elem_vcor = elem_vcor
self._ref_x0 = ref_x0
self._ref_y0 = ref_y0
self._ref_flag = "xy" if ref_flag is None else ref_flag
if model is None:
self._model = 'FLAME'
else:
self._model = model.upper()
if optdriver is None:
self._opt_driver = 'flamedriver_oc'
self.set_model()
self.create_machine(self._lat_file)
self.set_bpms(self._elem_bpm)
self.set_cors(self._elem_hcor, self._elem_vcor)
self.set_ref_x0(self._ref_x0)
self.set_ref_y0(self._ref_y0)
@property
def hcors(self):
return self._elem_hcor
@property
def vcors(self):
return self._elem_vcor
@property
def latfile(self):
return self._lat_file
@property
def ref_x0(self):
return self._ref_x0
@property
def ref_y0(self):
return self._ref_y0
@property
def ref_flag(self):
return self._ref_flag
@ref_flag.setter
def ref_flag(self, s):
self._ref_flag = s
@property
def bpms(self):
return self._elem_bpm
@latfile.setter
def latfile(self, latfile):
self._lat_file = latfile
@property
def optdriver(self):
return self._opt_driver
@optdriver.setter
def optdriver(self, driver):
self._opt_driver = driver
def get_machine(self):
""" get flame machine object for potential usage
:return: flame machine object or None
"""
try:
return self._machine
except:
return None
def create_machine(self, lat_file):
""" create machine instance with model configuration
* setup _machine
* setup _elem_bpm, _elem_cor or (_elem_hcor and _elem_vcor)
"""
if self._model == "FLAME":
self._create_flame_machine(lat_file)
elif self._model == "IMPACT":
self._create_impact_machine(lat_file)
def _create_flame_machine(self, lat_file):
try:
self._machine = Machine(open(lat_file, 'r'))
except IOError as e:
print("Failed to open {fn}: {info}".format(fn=e.filename,
info=e.args[-1]))
sys.exit(1)
except (RuntimeError, KeyError) as e:
print("Cannot parse lattice, " + e.args[-1])
sys.exit(1)
except:
print("Failed to create machine")
sys.exit(1)
def _create_impact_machine(self, lat_file):
pass
def set_ref_x0(self, ref_arr=None):
""" set reference orbit in x, if not set, use 0s
:param ref_arr: array of reference orbit values
size should be the same number as selected BPMs
"""
if ref_arr is None:
self._ref_x0 = [0]*len(self._elem_bpm)
else:
self._ref_x0 = ref_arr
def set_ref_y0(self, ref_arr=None):
""" set reference orbit in y, if not set, use 0s
:param ref_arr: array of reference orbit values
size should be the same number as selected BPMs
"""
if ref_arr is None:
self._ref_y0 = [0]*len(self._elem_bpm)
else:
self._ref_y0 = ref_arr
def set_bpms(self, bpm=None, pseudo_all=False):
""" set BPMs, and trying to set reference orbit ``(x,y)`` if ``x`` and ``y``
is of one unique value.
:param bpm: list of bpm indices, if None, use all BPMs
:param pseudo_all: if set True, will use all elements, ignore ``bpm`` parameter
"""
if bpm is None:
self._elem_bpm = self.get_all_bpms()
else:
self._elem_bpm = bpm
bpm_count = len(bpm)
if pseudo_all:
self._elem_bpm = "all"
bpm_count = len(self._machine)
try:
if test_one_element(self._ref_x0):
self.set_ref_x0([self._ref_x0[0]]*bpm_count)
else:
pass
if test_one_element(self._ref_y0):
self.set_ref_y0([self._ref_y0[0]]*bpm_count)
else:
pass
except:
pass
#print("Warning, not set reference orbit, requires 'set_ref_x0()' and 'set_ref_y0()'")
def set_cors(self, cor=None, hcor=None, vcor=None):
""" set correctors, if cor, hcor and vcor are None, use all correctors
if cor is not None, use cor, ignore hcor and vcor
:param cor: list of corrector indices, hcor, vcor,...
:param hcor: list of horizontal corrector indices
:param vcor: list of vertical corrector indices
"""
if cor is not None:
self._elem_hcor = cor[0::2]
self._elem_vcor = cor[1::2]
else:
if hcor is None and vcor is None:
self._elem_hcor = self.get_all_cors(type='h')
self._elem_vcor = self.get_all_cors(type='v')
else:
if hcor is not None:
self._elem_hcor = hcor
if vcor is not None:
self._elem_vcor = vcor
def set_model(self, **kws):
""" configure model
:param kws: only for impact, available keys:
"execpath": path of impact executable
"""
if self._model == 'flame':
pass # nothing more needs to do if model is 'flame'
else: # self._model is 'impact'
execpath = kws.get('execpath')
if execpath is not None:
self._impexec = os.path.real(os.path.expanduser(execpath))
else: # just use impact as the default name
self._impexec = "impact"
def get_all_bpms(self):
""" get list of all valid bpms indices
:return: a list of bpm indices
:Example:
>>> dakoc = DakotaOC('test/test.lat')
>>> print(dakoc.get_all_bpms())
"""
return self.get_elem_by_type(type='bpm')
def get_all_cors(self, type=None):
""" get list of all valid correctors indices
:param type: define corrector type, 'h': horizontal, 'v': vertical,
if not defined, return all correctors
:return: a list of corrector indices
:Example:
>>> dakoc = DakotaOC('test/test.lat')
>>> print(dakoc.get_all_cors())
"""
all_cors = self.get_elem_by_type(type='orbtrim')
if type is None:
return all_cors
elif type == 'h':
return all_cors[0::2]
elif type == 'v':
return all_cors[1::2]
else:
print("warning: unrecongnized corrector type.")
return all_cors
def get_elem_by_name(self, name):
""" get list of element(s) by name(s)
:param name: tuple or list of name(s)
:return: list of element indices
:Example:
>>> dakoc = DakotaOC('test/test.lat')
>>> names = ('LS1_CA01:BPM_D1144', 'LS1_WA01:BPM_D1155')
>>> idx = dakoc.get_elem_by_name(names)
>>> print(idx)
[18, 31]
"""
if isinstance(name, str):
name = (name, )
retval = [self._machine.find(name=n)[0] for n in name]
return retval
def get_elem_by_type(self, type):
""" get list of element(s) by type
:param type: string name of element type
:return: list of element indices
:Example:
>>> dakoc = DakotaOC('test/test.lat')
>>> type = 'bpm'
>>> idx = dakoc.get_elem_by_type(type)
>>> print(idx)
"""
retval = self._machine.find(type=type)
return retval
def get_all_elem(self):
""" get all elements from ``Machine`` object
:return: list of element indices
"""
return range(len(self._machine))
def gen_dakota_input(self, infile=None, debug=False):
""" generate dakota input file
:param infile: dakota input filename
:param debug: if True, generate a simple test input file
"""
if not debug:
dakinp = dakutils.DakotaInput()
#dakinp.set_template(name='oc')
dakinp.interface = self._oc_interface
dakinp.variables = self._oc_variables
dakinp.model = self._oc_model
dakinp.responses = self._oc_responses
dakinp.method = self._oc_method
dakinp.environment = self._oc_environ
else: # debug is True
dakinp = dakutils.DakotaInput()
if infile is None:
infile = self._dakhead + '.in'
inputfile = os.path.join(self._workdir, infile)
outputfile = inputfile.replace('.in', '.out')
self._dakin = inputfile
self._dakout = outputfile
dakinp.write(inputfile)
def set_variables(self, plist=None, initial=1e-4, lower=-0.01, upper=0.01):
""" setup variables block, that is setup ``oc_variables``
should be ready to invoke after ``set_cors()``
:param plist: list of defined parameters (``DakotaParam`` object),
automatically setup if not defined
:param initial: initial values for all variables, only valid when plist is None
:param lower: lower bound for all variables, only valid when plist is None
:param upper: upper bound for all variables, only valid when plist is None
"""
if plist is None:
if self._elem_hcor is None and self._elem_vcor is None:
print("No corrector is selected, set_cors() first.")
sys.exit(1)
else:
x_len = len(
self._elem_hcor) if self._elem_hcor is not None else 0
y_len = len(
self._elem_vcor) if self._elem_vcor is not None else 0
n = x_len + y_len
oc_variables = []
oc_variables.append('continuous_design = {0}'.format(n))
oc_variables.append(' initial_point' + "{0:>14e}".format(
initial) * n)
oc_variables.append(' lower_bounds ' + "{0:>14e}".format(
lower) * n)
oc_variables.append(' upper_bounds ' + "{0:>14e}".format(
upper) * n)
xlbls = ["'x{0:03d}'".format(i) for i in range(1, x_len + 1)]
ylbls = ["'y{0:03d}'".format(i) for i in range(1, y_len + 1)]
oc_variables.append(' descriptors ' + ''.join(
["{0:>14s}".format(lbl) for lbl in xlbls + ylbls]))
self._oc_variables = oc_variables
else: # plist = [p1, p2, ...]
n = len(plist)
initial_point_string = ' '.join(["{0:>14e}".format(p.initial) for p in plist])
lower_bounds_string = ' '.join(["{0:>14e}".format(p.lower) for p in plist])
upper_bounds_string = ' '.join(["{0:>14e}".format(p.upper) for p in plist])
descriptors_string = ' '.join(["{0:>14s}".format(p.label) for p in plist])
oc_variables = []
oc_variables.append('continuous_design = {0}'.format(n))
oc_variables.append(' initial_point' + initial_point_string)
oc_variables.append(' lower_bounds ' + lower_bounds_string)
oc_variables.append(' upper_bounds ' + upper_bounds_string)
oc_variables.append(' descriptors ' + descriptors_string)
self._oc_variables = oc_variables
def set_interface(self, interface=None, **kws):
""" setup interface block, that is setup ``oc_interface``
should be ready to invoke after ``set_cors`` and ``set_bpms``
:param interface: ``DakotaInterface`` object, automatically setup if not defined
"""
if interface is None:
oc_interface = dakutils.DakotaInterface(mode='fork', latfile=self._lat_file,
driver='flamedriver_oc',
bpms=self._elem_bpm,
hcors=self._elem_hcor,
vcors=self._elem_vcor,
ref_x0=self._ref_x0,
ref_y0=self._ref_y0,
ref_flag=self._ref_flag,
deactivate='active_set_vector')
else:
oc_interface = interface
self._oc_interface = oc_interface.get_config()
def set_model(self, model=None, **kws):
""" setup model block, that is setup ``oc_model``
:param model: ``DakotaModel`` object, automatically setup if not defined
"""
if model is None:
oc_model = dakutils.DakotaModel()
else:
oc_model = model
self._oc_model = oc_model.get_config()
def set_responses(self, responses=None, **kws):
""" setup responses block, that is setup ``oc_responses``
:param responses: ``DakotaResponses`` object, automatically setup if not defined
"""
if responses is None:
oc_responses = dakutils.DakotaResponses(gradient='numerical')
else:
oc_responses = responses
self._oc_responses = oc_responses.get_config()
def set_environ(self, environ=None):
""" setup environment block, that is setup ``oc_environ``
:param environ: ``DakotaEnviron`` object, automatically setup if not defined
"""
if environ is None:
oc_environ = dakutils.DakotaEnviron(tabfile='dakota.dat')
else:
oc_environ = environ
self._oc_environ= oc_environ.get_config()
def set_method(self, method=None):
""" setup method block, that is setup ``oc_method``
:param method: ``DakotaMethod`` object, automatically setup if not defined
"""
if method is None:
oc_method = dakutils.DakotaMethod(method='cg')
else:
oc_method = method
self._oc_method = oc_method.get_config()
def run(self, mpi=False, np=None, echo=True):
""" run optimization
:param mpi: if True, run DAKOTA in parallel mode, False by default
:param np: number of processes to use, only valid when ``mpi`` is True
:param echo: suppress output if False, True by default
"""
if mpi:
max_core_num = multiprocessing.cpu_count()
if np is None or int(np) > max_core_num:
np = max_core_num
run_command = "mpirun -np {np} {dakexec} -i {dakin} -o {dakout}".format(
np=np,
dakexec=self._dakexec,
dakin=self._dakin,
dakout=self._dakout)
else: # mpi is False
run_command = "{dakexec} -i {dakin} -o {dakout}".format(
dakexec=self._dakexec,
dakin=self._dakin,
dakout=self._dakout)
if echo:
subprocess.call(run_command.split())
else:
devnull = open(os.devnull, 'w')
subprocess.call(run_command.split(), stdout=devnull)
def get_opt_results(self, outfile=None, rtype='dict', label='plain'):
""" extract optimized results from dakota output
:param outfile: file name of dakota output file,
'dakota.out' by default
:param rtype: type of returned results, 'dict' or 'list',
'dict' by default
:param label: label types for returned variables, only valid when rtype 'dict',
'plain' by default:
* *'plain'*: variable labels with format of ``x1``, ``x2``, ``y1``, ``y2``, etc.
e.g. ``{'x1': v1, 'y1': v2}``
* *'fancy'*: variable labels with the name defined in lattice file,
e.g. ``'LS1_CA01:DCH_D1131'``, dict returned sample:
``{'LS1_CA01:DCH_D1131': {'id':9, 'config':{'theta_x':v1}}}``
.. note:: The ``fancy`` option will make re-configuring flame machine in a more
convenient way, such as:
>>> opt_cors = get_opt_results(label='fancy')
>>> for k,v in opt_cors.items():
>>> m.reconfigure(v['id'], v['config'])
>>> # here m is an instance of flame.Machine class
>>>
:return: by default return a dict of optimized results with each item
of the format like "x1":0.1 or more fancy format by set label with 'fancy', etc.,
if rtype='list', return a list of values, when the keys are ascend sorted.
:Example:
>>> opt_vars = get_optresults(outfile='flame_oc.out', rtype='dict'):
>>> print(opt_vars)
{'x2': 0.0020782814353, 'x1': -0.0017913264033}
>>> opt_vars = get_optresults(outfile='flame_oc.out', rtype='list'):
>>> print(opt_vars)
[-0.0017913264033, 0.0020782814353]
"""
if outfile is None:
outfile=self._dakout
if rtype == 'list':
return dakutils.get_opt_results(outfile=outfile, rtype=rtype)
else:
rdict = dakutils.get_opt_results(outfile=outfile, rtype=rtype)
if label == 'plain':
return rdict
else: # label = 'fancy'
val_x = [v for (k,v) in sorted(rdict.items()) if k.startswith('x')]
val_y = [v for (k,v) in sorted(rdict.items()) if k.startswith('y')]
vx = [{'id': i, 'config':{'theta_x': v}} for i,v in zip(self._elem_hcor, val_x)]
vy = [{'id': i, 'config':{'theta_y': v}} for i,v in zip(self._elem_vcor, val_y)]
kx = [self._machine.conf(i)['name'] for i in self._elem_hcor]
ky = [self._machine.conf(i)['name'] for i in self._elem_vcor]
return dict(zip(kx+ky, vx+vy))
def plot(self, outfile=None, figsize=(10, 8), dpi=90, fontsize=16, **kws):
"""Show orbit.
Parameters
----------
outfile : str
Output file of dakota.
figsize : tuple
Figure size, ``(h, w)``.
dpi : int
Figure dpi.
fontsize : int
Label font size.
"""
if outfile is None:
opt_vars = self.get_opt_results()
else:
opt_vars = self.get_opt_results(outfile=outfile)
idx_h, idx_v = self._elem_hcor, self._elem_vcor
zpos, x, y, mtmp = self.get_orbit((idx_h, idx_v), opt_vars)
fig = plt.figure(figsize=figsize, dpi=dpi, **kws)
ax = fig.add_subplot(111)
linex, = ax.plot(zpos, x, 'r-', label='$x$', lw=2)
liney, = ax.plot(zpos, y, 'b-', label='$y$', lw=2)
ax.set_xlabel('$z\,\mathrm{[m]}$', fontsize=fontsize)
ax.set_ylabel('$\mathrm{Orbit\;[mm]}$', fontsize=fontsize)
ax.legend(loc=3)
plt.show()
def get_orbit(self, idx=None, val=None, outfile=None):
""" calculate the orbit with given configurations
:param idx: (idx_hcor, idx_vcor), tuple of list of indices of h/v cors
:param val: values for each correctos, h/v
:param outfile: filename to save the data
:return: tuple of zpos, env_x, env_y, machine
"""
if idx is None:
idx_x, idx_y = self._elem_hcor, self._elem_vcor
else:
idx_x, idx_y = idx
if val is None:
val = self.get_opt_results()
else:
val = val
m = self._machine
val_x = [v for (k, v) in sorted(val.items()) if k.startswith('x')]
val_y = [v for (k, v) in sorted(val.items()) if k.startswith('y')]
[m.reconfigure(eid, {'theta_x': eval})
for (eid, eval) in zip(idx_x, val_x)]
[m.reconfigure(eid, {'theta_y': eval})
for (eid, eval) in zip(idx_y, val_y)]
s = m.allocState({})
r = m.propagate(s, 0, len(m), observe=range(len(m)))
ob_arr = range(len(m)) if self._elem_bpm == 'all' else self._elem_bpm
zpos = np.array([r[i][1].pos for i in ob_arr])
x, y = np.array(
[[r[i][1].moment0_env[j] for i in ob_arr] for j in [0, 2]])
if outfile is not None:
np.savetxt(outfile,
np.vstack((zpos, x, y)).T,
fmt="%22.14e",
comments='# orbit data saved at ' + time.ctime() + '\n',
header="#{0:^22s} {1:^22s} {2:^22s}".format(
"zpos [m]", "x [mm]", "y [mm]"),
delimiter=' ')
return zpos, x, y, m
def simple_run(self, method='cg', mpi=None, np=None, echo=True, **kws):
"""Run optimization after ``set_bpms()`` and ``set_cors()``, by using
default configuration and make full use of computing resources.
Parameters
----------
method : str
Optimization method, 'cg', 'ps', 'cg' by default.
mpi : bool
If True, run DAKOTA in parallel mode, False by default.
np : int
Number of processes to use, only valid when ``mpi`` is True.
echo : bool
Suppress output if False, True by default.
Keyword Arguments
-----------------
step : float
Gradient step, 1e-6 by default.
iternum : int
Max iteration number, 20 by default.
evalnum : int
Max function evaulation number, 1000 by default.
"""
if method == 'cg':
max_iter_num = kws.get('iternum', 20)
step = kws.get('step', 1e-6)
md = dakutils.DakotaMethod(method='cg',
max_iterations=max_iter_num)
self.set_method(method=md)
re = dakutils.DakotaResponses(gradient='numerical', step=step)
self.set_responses(responses=re)
else: # 'ps'
max_eval_num = kws.get('evalnum', 1000)
md = dakutils.DakotaMethod(method='ps',
max_function_evaluations=max_eval_num)
self.set_method(method=md)
re = dakutils.DakotaResponses()
self.set_responses(responses=re)
self.set_environ()
self.set_model()
self.set_variables()
self.set_interface()
self.gen_dakota_input()
if mpi:
max_core_num = multiprocessing.cpu_count()
if np is None or int(np) > max_core_num:
np = max_core_num
self.run(mpi=mpi, np=np, echo=echo)
else:
self.run(echo=echo)
def get_opt_latfile(self, outfile='out.lat'):
""" get optimized lattice file for potential next usage,
``run()`` or ``simple_run()`` should be evoked first to get the
optimized results.
:param outfile: file name for generated lattice file
:return: lattice file name or None if failed
"""
try:
z, x, y, m = self.get_orbit()
rfile = generate_latfile(m, latfile=outfile)
except:
print("Failed to generate latfile.")
rfile = None
finally:
return rfile
def test_dakotaoc1():
latfile = 'test/test.lat'
oc_ins = DakotaOC(lat_file=latfile)
oc_ins.gen_dakota_input(debug=True)
#oc_ins.run(mpi=True, np=2)
oc_ins.run()
print oc_ins.get_opt_results()
def test_dakotaoc2():
latfile = 'test_392.lat'
oc_ins = DakotaOC(lat_file=latfile)
#names = ('LS1_CA01:BPM_D1144', 'LS1_WA01:BPM_D1155')
#names = 'LS1_CA01:BPM_D1144'
names = 'LS1_CB06:DCH_D1574'
idx = oc_ins.get_elem_by_name(names)
print idx
# set BPMs and correctors
bpms = oc_ins.get_elem_by_type('bpm')
cors = oc_ins.get_all_cors()[45:61]
#cors = oc_ins.get_all_cors()[34:50]
#print oc_ins.get_all_cors(type='v')
oc_ins.set_bpms(bpm=bpms)
oc_ins.set_cors(cor=cors)
# set parameters
oc_ins.set_variables()
oc_ins.gen_dakota_input()
oc_ins.run(mpi=True, np=4)
#oc_ins.run()
oc_ins.plot()
#print oc_ins.get_opt_results()
if __name__ == '__main__':
#test_dakotaoc1()
test_dakotaoc2()
|
mit
|
anntzer/scikit-learn
|
benchmarks/bench_rcv1_logreg_convergence.py
|
18
|
7212
|
# Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
from joblib import Memory
import numpy as np
import gc
import time
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model._sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X ** 2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
|
bsd-3-clause
|
N3da/incubator-airflow
|
setup.py
|
4
|
9843
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
def check_previous():
installed_packages = ([package.project_name for package
in pip.get_installed_distributions()])
if 'airflow' in installed_packages:
print("An earlier non-apache version of Airflow was installed, "
"please uninstall it first. Then reinstall.")
sys.exit(1)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'google-cloud-dataflow',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.7.1']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.6']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'rednose'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
def do_setup():
check_previous()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.0.0',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.14',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
|
apache-2.0
|
schets/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
225
|
10791
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
fasrc/slyme
|
dio/plotmpl.py
|
2
|
2959
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, John A. Brunelle
# All rights reserved.
"""plotting with matplotlib"""
from dio import processor
DEFAULT_FIGURE_SIZE = (5,5)
@processor
def pie(label_k=None, value_k=None, out=None, err=None):
"""Make a pie chart."""
import matplotlib.pyplot as plt
#settings
figsize = DEFAULT_FIGURE_SIZE #(w,h) in inches
threshold_percentage = None #None or a number in the range (0,100)
other_label = 'OTHER' #applicable iff threshold_percentage is not None
#the data accumulator
pie = {}
try:
while True:
d = yield
if label_k is not None and value_k is not None:
pie[d[label_k]] = d[value_k]
else:
for k, v in d.iteritems():
pie[k] = v
except GeneratorExit:
if len(pie) > 0:
#collapse small slices into one, if applicable
if threshold_percentage is not None:
total_value = sum(pie.values())
threshold_value = threshold_percentage * total_value / 100.
other_value = 0
for key in pie.keys():
if pie[key] < threshold_value:
other_value += pie.pop(key)
if other_value > 0:
pie[other_label] = other_value
#sort order for the slices
def piecmp(x, y):
"""cmp for sorting pie slices.
Both x and y are tuples (key, value).
This sorts by value, except puts the 'OTHER' entry last
regardless of its value.
"""
if x[0] == other_label:
return -1
if y[0] == other_label:
return 1
return cmp(x[1], y[1])
#numeric labels on the slices
def percent(x):
"""Convert float x to a string label."""
return '%d%%' % (x + 0.5)
#convert to matplotlib's format
labels, values = zip(*sorted(pie.iteritems(), piecmp))
#--- actual plotting
fig = plt.figure(figsize=figsize)
ax = fig.gca()
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie
ax.pie(
values,
labels=labels,
autopct=percent,
)
##no legend option, yet
#ax.legend()
plt.show()
@processor
def histogram(out=None, err=None):
"""Make a histogram."""
import numpy as np
import matplotlib.pyplot as plt
#settings
figsize = DEFAULT_FIGURE_SIZE #(w,h) in inches
nbins = 10
#the data accumulator(s)
values = {}
try:
while True:
d = yield
for k in d:
try:
values[k].append(d[k])
except KeyError:
values[k] = [d[k],]
except GeneratorExit:
for k in values.keys():
#using numpy.histogram instead of pyplot.hist directly so we get
#numbers for textual output, too
hist, hist_bin_edges = np.histogram(values[k], nbins)
bin_width = (hist_bin_edges[-1]-hist_bin_edges[0])/len(hist)
#--- actual plotting
fig = plt.figure(figsize=figsize)
ax = fig.gca()
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.bar
ax.bar(
hist_bin_edges[:-1],
hist,
width=bin_width,
)
#I don't like how the highest bar is flush with the plot top by default
ax.set_ylim(top=1.1*max(hist))
plt.show()
|
bsd-3-clause
|
MyRookie/SentimentAnalyse
|
venv/lib/python2.7/site-packages/numpy/lib/npyio.py
|
35
|
71412
|
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
mit
|
thomasgibson/tabula-rasa
|
gravity_waves/plot_hybridization_vs_cfl.py
|
1
|
5050
|
import os
import sys
import pandas as pd
import seaborn
from matplotlib import pyplot as plt
FONTSIZE = 14
MARKERSIZE = 10
LINEWIDTH = 3
cfl_range = [2, 4, 6, 8, 10, 12, 16, 24, 32, 64]
lo_rt_data = ["results/hybrid_RT1_GW_ref6_nlayers85_CFL%d.csv" %
cfl for cfl in cfl_range]
nlo_rt_data = ["results/hybrid_RT2_GW_ref4_nlayers85_CFL%d.csv" %
cfl for cfl in cfl_range]
lo_rtcf_data = ["results/hybrid_RTCF1_GW_ref7_nlayers85_CFL%d.csv" %
cfl for cfl in cfl_range]
nlo_rtcf_data = ["results/hybrid_RTCF2_GW_ref5_nlayers85_CFL%d.csv" %
cfl for cfl in cfl_range]
nlo_bdfm_data = ["results/hybrid_BDFM2_GW_ref4_nlayers85_CFL%d.csv" %
cfl for cfl in cfl_range]
for data in (lo_rtcf_data + nlo_rtcf_data +
lo_rt_data + nlo_rt_data + nlo_bdfm_data):
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
lo_rt_dfs = pd.concat(pd.read_csv(data) for data in lo_rt_data)
nlo_rt_dfs = pd.concat(pd.read_csv(data) for data in nlo_rt_data)
lo_rtcf_dfs = pd.concat(pd.read_csv(data) for data in lo_rtcf_data)
nlo_rtcf_dfs = pd.concat(pd.read_csv(data) for data in nlo_rtcf_data)
nlo_bdfm_dfs = pd.concat(pd.read_csv(data) for data in nlo_bdfm_data)
lo_rt_groups = lo_rt_dfs.groupby(["CFL"], as_index=False)
nlo_rt_groups = nlo_rt_dfs.groupby(["CFL"], as_index=False)
lo_rtcf_groups = lo_rtcf_dfs.groupby(["CFL"], as_index=False)
nlo_rtcf_groups = nlo_rtcf_dfs.groupby(["CFL"], as_index=False)
nlo_bdfm_groups = nlo_bdfm_dfs.groupby(["CFL"], as_index=False)
colors = seaborn.color_palette(n_colors=3)
linestyles = ["solid", "dotted"]
seaborn.set(style="ticks")
fig, (axes,) = plt.subplots(1, 1, figsize=(7, 6), squeeze=False)
ax, = axes
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
ax.set_ylabel("Krylov iterations (Trace system)", fontsize=FONTSIZE+2)
ax.set_xlim(0, 64)
ax.set_ylim(0, 20)
ax.set_xticks(cfl_range)
ax.set_xticklabels(cfl_range)
yticks = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
ax.set_yticks(yticks)
ax.set_yticklabels(yticks)
ax.axvline(10, color='k')
ax.axvline(2, color='k')
ax.axvspan(2, 10, ymin=0, ymax=20, alpha=0.5, color='gray')
lo_rt_cfls = []
lo_rt_iters = []
for group in lo_rt_groups:
cfl, df = group
lo_rt_cfls.append(cfl)
lo_rt_iters.append(df.InnerIters.values[0])
nlo_rt_cfls = []
nlo_rt_iters = []
for group in nlo_rt_groups:
cfl, df = group
nlo_rt_cfls.append(cfl)
nlo_rt_iters.append(df.InnerIters.values[0])
lo_rtcf_cfls = []
lo_rtcf_iters = []
for group in lo_rtcf_groups:
cfl, df = group
lo_rtcf_cfls.append(cfl)
lo_rtcf_iters.append(df.InnerIters.values[0])
nlo_rtcf_cfls = []
nlo_rtcf_iters = []
for group in nlo_rtcf_groups:
cfl, df = group
nlo_rtcf_cfls.append(cfl)
nlo_rtcf_iters.append(df.InnerIters.values[0])
nlo_bdfm_cfls = []
nlo_bdfm_iters = []
for group in nlo_bdfm_groups:
cfl, df = group
nlo_bdfm_cfls.append(cfl)
nlo_bdfm_iters.append(df.InnerIters.values[0])
ax.plot(lo_rt_cfls, lo_rt_iters,
label="$RT_1$",
color=colors[2],
marker="s",
linewidth=LINEWIDTH,
markersize=MARKERSIZE,
linestyle=linestyles[0])
ax.plot(nlo_rt_cfls, nlo_rt_iters,
label="$RT_2$",
color=colors[2],
marker="s",
linewidth=LINEWIDTH,
markersize=MARKERSIZE,
linestyle=linestyles[1])
ax.plot(lo_rtcf_cfls, lo_rtcf_iters,
label="$RTCF_1$",
color=colors[0],
marker="o",
linewidth=LINEWIDTH,
markersize=MARKERSIZE,
linestyle=linestyles[0],
clip_on=False)
ax.plot(nlo_rtcf_cfls, nlo_rtcf_iters,
label="$RTCF_2$",
color=colors[0],
marker="o",
linewidth=LINEWIDTH,
markersize=MARKERSIZE,
linestyle=linestyles[1],
clip_on=False)
ax.plot(nlo_bdfm_cfls, nlo_bdfm_iters,
label="$BDFM_2$",
color=colors[1],
marker="^",
linewidth=LINEWIDTH,
markersize=MARKERSIZE,
linestyle=linestyles[1],
clip_on=False)
for tick in ax.get_xticklabels():
tick.set_fontsize(FONTSIZE)
for tick in ax.get_yticklabels():
tick.set_fontsize(FONTSIZE)
ax.grid(b=True, which='major', linestyle='-.')
xlabel = fig.text(0.5, 0,
"Horizontal CFL number",
ha='center',
fontsize=FONTSIZE+2)
handles, labels = ax.get_legend_handles_labels()
legend = fig.legend(handles, labels,
loc=9,
bbox_to_anchor=(0.4, 0.875),
bbox_transform=fig.transFigure,
ncol=3,
handlelength=2,
fontsize=FONTSIZE-2,
numpoints=1,
frameon=True)
fig.savefig("hybridization_vs_cfl.pdf",
orientation="landscape",
format="pdf",
transparent=True,
bbox_inches="tight",
bbox_extra_artists=[xlabel, legend])
|
mit
|
nipe0324/kaggle-keypoints-detection-keras
|
model7.py
|
1
|
3502
|
# モデル7
#
# Early Stopping
#
import time
from datetime import datetime
from load_data import load2d
from saver import save_arch, save_history
from utils import reshape2d_by_image_dim_ordering
from plotter import plot_hist, plot_model_arch
import pickle
import numpy as np
# This module will be removed in 0.20.
from sklearn.cross_validation import train_test_split
from data_generator import FlippedImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense, Activation, Dropout
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping
from keras import backend as K
# 変数
model_name = 'model7'
nb_epoch = 5000
validation_split = 0.2
lr = 0.01
start = 0.03
stop = 0.001
learning_rates = np.linspace(start, stop, nb_epoch)
patience = 100 # EarlyStoppingでn回連続でエラーの最小値が更新されなかったらストップさせる
momentum = 0.9
nesterov = True
loss_method = 'mean_squared_error'
arch_path = 'model/' + model_name + '-arch-' + str(nb_epoch) + '.json'
weights_path = 'model/' + model_name + '-weights-' + str(nb_epoch) + '.hdf5'
# データ読み込み
X, y = load2d()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=validation_split, random_state=42)
# image_dim_orderring に合わせて2D画像のshapeを変える
X_train, input_shape = reshape2d_by_image_dim_ordering(X_train)
X_val, _ = reshape2d_by_image_dim_ordering(X_val)
# モデル定義
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(64, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(128, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(1000))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1000))
model.add(Activation('relu'))
model.add(Dense(30))
save_arch(model, arch_path) # モデルを保存しておく
# トレーニングの準備
checkpoint_collback = ModelCheckpoint(filepath = weights_path,
monitor='val_loss',
save_best_only=True,
mode='auto')
change_lr = LearningRateScheduler(lambda epoch: float(learning_rates[epoch]))
early_stop = EarlyStopping(patience=patience) # patience回連続でエラーの最小値が更新されなかったらストップさせる
flip_gen = FlippedImageDataGenerator()
sgd = SGD(lr=lr, momentum=momentum, nesterov=nesterov)
model.compile(loss=loss_method, optimizer=sgd)
# トレーニング
start_time = time.time()
print('start_time: %s' % (datetime.now()))
hist = model.fit_generator(flip_gen.flow(X_train, y_train),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_val, y_val),
callbacks=[checkpoint_collback, change_lr, early_stop])
print('end_time: %s, duracion(min): %d' % (datetime.now(), int(time.time()-start_time) / 60))
# プロットしてファイルとして保存する
# plot_hist(hist, model_name)
# plot_model_arch(model, model_name)
save_history(hist, model_name)
|
apache-2.0
|
aocalderon/PhD
|
Y2Q3/Datasets/drawMBRs.py
|
1
|
1454
|
#!/opt/miniconda3/bin/python
import pandas as pd
import folium
from pyproj import Proj, transform
inProj = Proj(init='epsg:4799')
outProj = Proj(init='epsg:4326')
points = pd.read_csv('B1K_RTree.csv', header=None)
def convertCoords1(point):
x, y = transform(inProj, outProj, point[1], point[2])
return pd.Series([x, y])
points[[1, 2]] = points.apply(convertCoords1, axis=1)
mbrs = pd.read_csv('B1K_RTree_MBRs.csv', header=None)
def convertCoords2(points):
x1, y1 = transform(inProj, outProj, points[1], points[2])
x2, y2 = transform(inProj, outProj, points[3], points[4])
return pd.Series([x1, y1, x2, y2])
mbrs[[1, 2, 3, 4]] = mbrs.apply(convertCoords2, axis=1)
the_map = folium.Map(location=[39.93644, 116.38108], zoom_start=13)
the_points = folium.FeatureGroup(name="Points")
points.apply(lambda point:folium.features.CircleMarker(
location=[point[2], point[1]],
popup="MBR ID: {0} => [{1},{2}]".format(point[0], point[2], point[1]),
radius=2
).add_to(the_points), axis=1)
the_mbrs = folium.FeatureGroup(name="MBRs")
mbrs.apply(lambda mbr:folium.features.RectangleMarker(
bounds=[[mbr[2], mbr[1]], [mbr[4], mbr[3]]],
popup="MBR ID:{0} => [[{1},{2}];[{3},{4}]]".format(mbr[0],mbr[2], mbr[1], mbr[4], mbr[3]),
color='blue',
fill_color='blue'
).add_to(the_mbrs), axis=1)
the_map.add_child(the_points)
the_map.add_child(the_mbrs)
folium.LayerControl().add_to(the_map)
the_map.save('MBRs.html')
|
lgpl-3.0
|
cjayb/mne-python
|
mne/channels/channels.py
|
1
|
65505
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Teon Brooks <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import numpy as np
from scipy import sparse
from ..defaults import HEAD_SIZE_DEFAULT, _handle_default
from ..utils import (verbose, logger, warn, _check_preload, _validate_type,
fill_doc, _check_option)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info, Info, MontageMixin, create_info
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx,
_get_channel_types)
from ..io.write import DATE_NONE
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
nmag = np.sum(
[c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
# Our helmet does not match very well, so let's just create it
have_helmet = False
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd',
'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr',
'ecog', 'seeg']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(instances, copy=True, verbose=None):
"""Equalize channel picks and ordering across multiple MNE-Python objects.
First, all channels that are not common to each object are dropped. Then,
using the first object in the list as a template, the channels of each
object are re-ordered to match the template. The end result is that all
given objects define the same channels, in the same order.
Parameters
----------
instances : list
A list of MNE-Python objects to equalize the channels for. Objects can
be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance,
CrossSpectralDensity or Info.
copy : bool
When dropping and/or re-ordering channels, an object will be copied
when this parameter is set to ``True``. When set to ``False`` (the
default) the dropping and re-ordering of channels happens in-place.
.. versionadded:: 0.20.0
%(verbose)s
Returns
-------
equalized_instances : list
A list of MNE-Python objects that have the same channels defined in the
same order.
Notes
-----
This function operates inplace.
"""
from ..cov import Covariance
from ..io.base import BaseRaw
from ..io.meas_info import Info
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..forward import Forward
from ..time_frequency import _BaseTFR, CrossSpectralDensity
# Instances need to have a `ch_names` attribute and a `pick_channels`
# method that supports `ordered=True`.
allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward,
Covariance, CrossSpectralDensity, Info)
allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, "
"CrossSpectralDensity or Info")
for inst in instances:
_validate_type(inst, allowed_types, "Instances to be modified",
allowed_types_str)
chan_template = instances[0].ch_names
logger.info('Identifying common channels ...')
channels = [set(inst.ch_names) for inst in instances]
common_channels = set(chan_template).intersection(*channels)
all_channels = set(chan_template).union(*channels)
dropped = list(set(all_channels - common_channels))
# Preserve the order of chan_template
order = np.argsort([chan_template.index(ch) for ch in common_channels])
common_channels = np.array(list(common_channels))[order].tolist()
# Update all instances to match the common_channels list
reordered = False
equalized_instances = []
for inst in instances:
# Only perform picking when needed
if inst.ch_names != common_channels:
if copy:
inst = inst.copy()
inst.pick_channels(common_channels, ordered=True)
if len(inst.ch_names) == len(common_channels):
reordered = True
equalized_instances.append(inst)
if dropped:
logger.info('Dropped the following channels:\n%s' % dropped)
elif reordered:
logger.info('Channels have been re-ordered.')
return equalized_instances
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
@fill_doc
def get_channel_types(self, picks=None, unique=False, only_data_chs=False):
"""Get a list of channel type for each channel.
Parameters
----------
%(picks_all)s
unique : bool
Whether to return only unique channel types. Default is ``False``.
only_data_chs : bool
Whether to ignore non-data channels. Default is ``False``.
Returns
-------
channel_types : list
The channel types.
"""
return _get_channel_types(self.info, picks=picks, unique=unique,
only_data_chs=only_data_chs)
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH,
'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH,
'fnirs_od': FIFF.FIFFV_FNIRS_CH,
'hbo': FIFF.FIFFV_FNIRS_CH,
'hbr': FIFF.FIFFV_FNIRS_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V,
'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V,
'fnirs_od': FIFF.FIFF_UNIT_NONE,
'hbo': FIFF.FIFF_UNIT_MOL,
'hbr': FIFF.FIFF_UNIT_MOL}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA',
FIFF.FIFF_UNIT_CEL: 'C'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(MontageMixin):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
ch_type='auto', forward=None, verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
%(set_eeg_reference_ref_channels)s
%(set_eeg_reference_projection)s
%(set_eeg_reference_ch_type)s
%(set_eeg_reference_forward)s
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
%(set_eeg_reference_see_also_notes)s
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection, ch_type=ch_type,
forward=forward)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float64)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
@verbose
def set_channel_types(self, mapping, verbose=None):
"""Define the sensor type of channels.
Parameters
----------
mapping : dict
A dictionary mapping a channel to a sensor type (str), e.g.,
``{'EEG061': 'eog'}``.
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr, fnirs_cw_amplitude, fnirs_od
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
elif ch_type == 'fnirs_cw_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE
elif ch_type == 'fnirs_od':
coil_type = FIFF.FIFFV_COIL_FNIRS_OD
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
return self
@fill_doc
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
%(rename_channels_mapping)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
return self
@verbose
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True, sphere=None,
verbose=None):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose_meth)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show, sphere=sphere, verbose=verbose)
@verbose
def anonymize(self, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
Parameters
----------
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified instance.
Notes
-----
%(anonymize_info_notes)s
.. versionadded:: 0.13.0
"""
anonymize_info(self.info, daysback=daysback, keep_his=keep_his,
verbose=verbose)
self.set_meas_date(self.info['meas_date']) # unify annot update
return self
def set_meas_date(self, meas_date):
"""Set the measurement start date.
Parameters
----------
meas_date : datetime | float | tuple | None
The new measurement date.
If datetime object, it must be timezone-aware and in UTC.
A tuple of (seconds, microseconds) or float (alias for
``(meas_date, 0)``) can also be passed and a datetime
object will be automatically created. If None, will remove
the time reference.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified raw instance. Operates in place.
See Also
--------
mne.io.Raw.anonymize
Notes
-----
If you want to remove all time references in the file, call
:func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>`
after calling ``inst.set_meas_date(None)``.
.. versionadded:: 0.20
"""
from ..annotations import _handle_meas_date
meas_date = _handle_meas_date(meas_date)
self.info['meas_date'] = meas_date
# clear file_id and meas_id if needed
if meas_date is None:
for key in ('file_id', 'meas_id'):
value = self.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value['machid'].copy()
_tmp[:] = 0
value['machid'] = _tmp
if hasattr(self, 'annotations'):
self.annotations._orig_time = meas_date
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=None, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, csd=False, include=(), exclude='bads',
selection=None, verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all
gradiometers, or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg : bool | str
If True include CTF / 4D reference channels. If 'auto', reference
channels are included if compensations are present and ``meg`` is
not False. Can also be the string options for the ``meg``
parameter.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
csd : bool
EEG-CSD channels.
include : list of str
List of additional channels to include. If empty do not include
any.
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of str
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
self._pick_drop_channels(idx)
# remove dropped channel types from reject and flat
if getattr(self, 'reject', None) is not None:
# use list(self.reject) to avoid RuntimeError for changing
# dictionary size during iteration
for ch_type in list(self.reject):
if ch_type not in self:
del self.reject[ch_type]
if getattr(self, 'flat', None) is not None:
for ch_type in list(self.flat):
if ch_type not in self:
del self.flat[ch_type]
return self
def pick_channels(self, ch_names, ordered=False):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
ordered : bool
If True (default False), ensure that the order of the channels in
the modified instance matches the order of ``ch_names``.
.. versionadded:: 0.20.0
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered)
return self._pick_drop_channels(picks)
@fill_doc
def pick(self, picks, exclude=()):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io import BaseRaw
from ..time_frequency import AverageTFR, EpochsTFR
if not isinstance(self, BaseRaw):
_check_preload(self, 'adding, dropping, or reordering channels')
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if getattr(self, '_read_picks', None) is not None:
self._read_picks = [r[idx] for r in self._read_picks]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if getattr(self, '_projector', None) is not None:
self._projector = self._projector[idx][:, idx]
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
if hasattr(self, '_data'): # skip non-preloaded Raw
self._data = self._data.take(idx, axis=axis)
else:
assert isinstance(self, BaseRaw) and not self.preload
self._pick_projs()
return self
def _pick_projs(self):
"""Keep only projectors which apply to at least 1 data channel."""
drop_idx = []
for idx, proj in enumerate(self.info['projs']):
if not set(self.info['ch_names']) & set(proj['data']['col_names']):
drop_idx.append(idx)
for idx in drop_idx:
logger.info(f"Removing projector {self.info['projs'][idx]}")
if drop_idx and hasattr(self, 'del_proj'):
self.del_proj(drop_idx)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object.
force_update_info : bool
If True, force the info for objects to be appended to match the
values in ``self``. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
# We should never use these since data are preloaded, let's just
# set it to something large and likely to break (2 ** 31 - 1)
extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:])
assert all(len(r) == infos[0]['nchan'] for r in self._read_picks)
self._read_picks = [
np.concatenate([r, extra_idx]) for r in self._read_picks]
assert all(len(r) == self.info['nchan'] for r in self._read_picks)
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin='auto', method=None, verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of channels
using the minimum-norm method.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'`` (default), which means a head-digitization-based
origin fit.
.. versionadded:: 0.17
method : dict
Method to use for each channel type.
Currently only the key "eeg" has multiple options:
- ``"spline"`` (default)
Use spherical spline interpolation.
- ``"MNE"``
Use minimum-norm projection to a sphere and back.
This is the method used for MEG channels.
The value for "meg" is "MNE", and the value for
"fnirs" is "nearest". The default (None) is thus an alias for::
method=dict(meg="MNE", eeg="spline", fnirs="nearest")
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..bem import _check_origin
from .interpolation import _interpolate_bads_eeg,\
_interpolate_bads_meeg, _interpolate_bads_nirs
_check_preload(self, "interpolation")
method = _handle_default('interpolation_method', method)
for key in method:
_check_option('method[key]', key, ('meg', 'eeg', 'fnirs'))
_check_option("method['eeg']", method['eeg'], ('spline', 'MNE'))
_check_option("method['meg']", method['meg'], ('MNE',))
_check_option("method['fnirs']", method['fnirs'], ('nearest',))
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
logger.info('Interpolating bad channels')
origin = _check_origin(origin, self.info)
if method['eeg'] == 'spline':
_interpolate_bads_eeg(self, origin=origin)
eeg_mne = False
else:
eeg_mne = True
_interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne)
_interpolate_bads_nirs(self)
if reset_bads is True:
self.info['bads'] = []
return self
@fill_doc
def rename_channels(info, mapping):
"""Rename channels.
.. warning:: The channel names must have at most 15 characters
Parameters
----------
info : dict
Measurement info to modify.
%(rename_channels_mapping)s
"""
_validate_type(info, Info, 'info')
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
bad_new_names = [name for _, name in new_names if len(name) > 15]
if len(bad_new_names):
raise ValueError('Channel names cannot be longer than 15 '
'characters. These channel names are not '
'valid : %s' % new_names)
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_adjacency(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtriptoolbox.org/template/neighbours/>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
find_ch_adjacency
Notes
-----
This function is closely related to :func:`find_ch_adjacency`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_adjacency` can compute the adjacency matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
adjacency = _ch_neighbor_adjacency(ch_names, neighbors)
# picking before constructing matrix is buggy
adjacency = adjacency[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return adjacency, ch_names
def _ch_neighbor_adjacency(ch_names, neighbors):
"""Compute sensor adjacency matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_adjacency : scipy.sparse matrix
The adjacency matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_adjacency = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True
ch_adjacency = sparse.csr_matrix(ch_adjacency)
return ch_adjacency
def find_ch_adjacency(info, ch_type):
"""Find the adjacency matrix for the given channels.
This function tries to infer the appropriate adjacency matrix template
for the given channels. If a template is not found, the adjacency matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
read_ch_adjacency
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate adjacency matrix template only
works for MEG data at the moment. This means that the adjacency matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_adjacency` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_neuromag_122_grad:
conn_name = 'neuromag122'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
elif n_kit_grads > 0:
from ..io.kit.constants import KIT_NEIGHBORS
conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])
if conn_name is not None:
logger.info('Reading adjacency matrix for %s.' % conn_name)
return read_ch_adjacency(conn_name)
logger.info('Could not find a adjacency matrix for the data. '
'Computing adjacency based on Delaunay triangulations.')
return _compute_ch_adjacency(info, ch_type)
def _compute_ch_adjacency(info, ch_type):
"""Compute channel adjacency matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
"""
from scipy.spatial import Delaunay
from .. import spatial_tris_adjacency
from ..channels.layout import _find_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute adjacency '
'matrix.')
# only for one of the pair
xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT)
else:
xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT)
tri = Delaunay(xy)
neighbors = spatial_tris_adjacency(tri.simplices)
if combine_grads:
ch_adjacency = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True
ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_adjacency = sparse.csr_matrix(ch_adjacency)
else:
ch_adjacency = sparse.lil_matrix(neighbors)
ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0]))
ch_adjacency = ch_adjacency.tocsr()
return ch_adjacency, ch_names
def fix_mag_coil_types(info, use_cal=False):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
use_cal : bool
If True, further refine the check for old coil types by checking
``info['chs'][ii]['cal']``.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of ``fix_mag_coil_types`` is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info, use_cal)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info, use_cal=False):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
# From email exchanges, systems with the larger T2 coil only use the cal
# value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10
# (Triux). So we can use a simple check for > 3e-11.
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
if use_cal:
if ch['cal'] > 3e-11:
old_mag_inds.append(ii)
else:
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad,
has_csd_coils)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in ``midline`` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of Info
Where to obtain the channel names from. The picks will
be in relation to the position in ``info["ch_names"]``. If possible,
this lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the
``Midline`` key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
def combine_channels(inst, groups, method='mean', keep_stim=False,
drop_bad=False):
"""Combine channels based on specified channel grouping.
Parameters
----------
inst : instance of Raw, Epochs, or Evoked
An MNE-Python object to combine the channels for. The object can be of
type Raw, Epochs, or Evoked.
groups : dict
Specifies which channels are aggregated into a single channel, with
aggregation method determined by the ``method`` parameter. One new
pseudo-channel is made per dict entry; the dict values must be lists of
picks (integer indices of ``ch_names``). For example::
groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
method : str | callable
Which method to use to combine channels. If a :class:`str`, must be one
of 'mean', 'median', or 'std' (standard deviation). If callable, the
callable must accept one positional input (data of shape ``(n_channels,
n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs,
n_times)``. For example with an instance of Raw or Evoked::
method = lambda data: np.mean(data, axis=0)
Another example with an instance of Epochs::
method = lambda data: np.median(data, axis=1)
Defaults to ``'mean'``.
keep_stim : bool
If ``True``, include stimulus channels in the resulting object.
Defaults to ``False``.
drop_bad : bool
If ``True``, drop channels marked as bad before combining. Defaults to
``False``.
Returns
-------
combined_inst : instance of Raw, Epochs, or Evoked
An MNE-Python object of the same type as the input ``inst``, containing
one virtual channel for each group in ``groups`` (and, if ``keep_stim``
is ``True``, also containing stimulus channels).
"""
from ..io import BaseRaw, RawArray
from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray
ch_axis = 1 if isinstance(inst, BaseEpochs) else 0
ch_idx = list(range(inst.info['nchan']))
ch_names = inst.info['ch_names']
ch_types = inst.get_channel_types()
inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data()
groups = OrderedDict(deepcopy(groups))
# Convert string values of ``method`` into callables
# XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py
if isinstance(method, str):
method_dict = {key: partial(getattr(np, key), axis=ch_axis)
for key in ('mean', 'median', 'std')}
try:
method = method_dict[method]
except KeyError:
raise ValueError('"method" must be a callable, or one of "mean", '
f'"median", or "std"; got "{method}".')
# Instantiate channel info and data
new_ch_names, new_ch_types, new_data = [], [], []
if not isinstance(keep_stim, bool):
raise TypeError('"keep_stim" must be of type bool, not '
f'{type(keep_stim)}.')
if keep_stim:
stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True))
if stim_ch_idx:
new_ch_names = [ch_names[idx] for idx in stim_ch_idx]
new_ch_types = [ch_types[idx] for idx in stim_ch_idx]
new_data = [np.take(inst_data, idx, axis=ch_axis)
for idx in stim_ch_idx]
else:
warn('Could not find stimulus channels.')
# Get indices of bad channels
ch_idx_bad = []
if not isinstance(drop_bad, bool):
raise TypeError('"drop_bad" must be of type bool, not '
f'{type(drop_bad)}.')
if drop_bad and inst.info['bads']:
ch_idx_bad = pick_channels(ch_names, inst.info['bads'])
# Check correctness of combinations
for this_group, this_picks in groups.items():
# Check if channel indices are out of bounds
if not all(idx in ch_idx for idx in this_picks):
raise ValueError('Some channel indices are out of bounds.')
# Check if heterogeneous sensor type combinations
this_ch_type = np.array(ch_types)[this_picks]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; '
f'"{this_group}" contains types {types}.')
# Remove bad channels
these_bads = [idx for idx in this_picks if idx in ch_idx_bad]
this_picks = [idx for idx in this_picks if idx not in ch_idx_bad]
if these_bads:
logger.info('Dropped the following channels in group '
f'{this_group}: {these_bads}')
# Check if combining less than 2 channel
if len(set(this_picks)) < 2:
warn(f'Less than 2 channels in group "{this_group}" when '
f'combining by method "{method}".')
# If all good create more detailed dict without bad channels
groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0])
# Combine channels and add them to the new instance
for this_group, this_group_dict in groups.items():
new_ch_names.append(this_group)
new_ch_types.append(this_group_dict['ch_type'])
this_picks = this_group_dict['picks']
this_data = np.take(inst_data, this_picks, axis=ch_axis)
new_data.append(method(this_data))
new_data = np.swapaxes(new_data, 0, ch_axis)
info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names,
ch_types=new_ch_types)
if isinstance(inst, BaseRaw):
combined_inst = RawArray(new_data, info, first_samp=inst.first_samp,
verbose=inst.verbose)
elif isinstance(inst, BaseEpochs):
combined_inst = EpochsArray(new_data, info, events=inst.events,
tmin=inst.times[0], verbose=inst.verbose)
elif isinstance(inst, Evoked):
combined_inst = EvokedArray(new_data, info, tmin=inst.times[0],
verbose=inst.verbose)
return combined_inst
|
bsd-3-clause
|
simontorres/goodman
|
docs/conf.py
|
2
|
6720
|
# -*- coding: utf-8 -*-
#
# Goodman HTS Pipeline User Manual documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 30 18:30:57 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
__version__ = __import__('goodman_pipeline').__version__
extensions = ['sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinxcontrib.napoleon']
intersphinx_mapping = {
'astropy': ('http://docs.astropy.org/en/latest/', None),
'ccdproc': ('https://ccdproc.readthedocs.io/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('https://numpy.readthedocs.io/en/latest/', None),
'cython': ('http://docs.cython.org/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Goodman HTS Pipeline Documentation'
copyright = u'2017 NOAO/AURA, Inc. All rights reserved'
author = u'Simón Torres R., Bruno C. Quint, César Briceño'
license = 'bsd3'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for PDF output ----------------------------------------------
pdf_documents = [('user_manual',
u'user_manual_v{:s}'.format(release),
u'Goodman HTS Pipeline User Manual',
u'Simón Torres, César Briceño and Bruno Quint'), ]
# pdf_break_level = 0
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'bootstrap-astropy'
html_theme = 'sphinx_rtd_theme'
html_logo = '_static/soar_logo.png'
html_context = {'license': 'BSD 3-Clause License'}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {
# 'logotext1': 'Goodman',
# 'logotext2': 'Pipeline',
# 'logotext3': ':User Manual'}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# html_style = 'goodman.css'
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GoodmanHTSPipelineUserManualdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GoodmanHTSPipelineUserManual.tex', u'Goodman HTS Pipeline User Manual Documentation',
u'Simon Torres R.', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'goodmanhtspipelineusermanual', u'Goodman HTS Pipeline User Manual Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GoodmanHTSPipelineUserManual', u'Goodman HTS Pipeline User Manual Documentation',
author, 'GoodmanHTSPipelineUserManual', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
bsd-3-clause
|
shikhardb/scikit-learn
|
sklearn/linear_model/tests/test_bayes.py
|
299
|
1770
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
|
bsd-3-clause
|
jmontgom10/Mimir_pyPol
|
02b_buildStarAndNebulaMasks.py
|
1
|
19696
|
import os
import glob
import numpy as np
import warnings
from skimage import measure, morphology
from scipy import ndimage
from astropy.table import Table, Column
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
from astroquery.vizier import Vizier
# For debugging
import matplotlib.pyplot as plt
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.set_instrument('2MASS')
# This script will read in the background level estimated for each on-target
# image in the previous step. The background level in dimmest parts of the
# on-target image will be directly computed, and the residual between the direct
# estimate and the interpolation will be stored. The distribution of these
# residual will be used to estimate which interpolated background levels can be
# trusted.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_Reduced\\201611'
# Build the path to the S3_Asotremtry files
S3_dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611'
# This is the directory where the 2MASS tiles of the targets have been saved
# Go to "http://hachi.ipac.caltech.edu/" to download 2MASS tiles
TMASSdir = ".\\2MASSimages"
# Setup new directory for masks
maskDir = os.path.join(pyPol_data, 'Masks')
if (not os.path.isdir(maskDir)):
os.mkdir(maskDir, 0o755)
starMaskDir = os.path.join(maskDir, 'starMasks')
if (not os.path.isdir(starMaskDir)):
os.mkdir(starMaskDir, 0o755)
# Read in the indexFile data and select the filenames
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
useRows = np.where(fileIndex['USE'])
fileIndex = fileIndex[useRows]
################################################################################
def find_2MASS_flux(array):
# Identify which pixels have acceptable "background" levels. Start by
# grabbing the image statistics
mean, median, stddev = sigma_clipped_stats(array)
# Idesntify pixels more than 2-sigma above the background
fgdThresh = median + 2.0*stddev
fgdRegion = array > fgdThresh
# Repeat the classification withiout the *definitely* nebular pixels
bkgPix = np.logical_not(fgdRegion)
mean, median, stddev = sigma_clipped_stats(array[bkgPix])
fgdThresh = median + 2.0*stddev
fgdRegion = array > fgdThresh
# Clean the foreground ID region
all_labels = measure.label(fgdRegion)
all_labels1 = morphology.remove_small_objects(all_labels, min_size=50)
fgdRegion = all_labels1 > 0
# Grab the part *only* connected to the central, nebular region
ny, nx = fgdRegion.shape
all_labels = measure.label(fgdRegion)
nebularLabel = all_labels[ny//2, nx//2]
nebularMask = all_labels == nebularLabel
starMask = np.logical_and(
all_labels > 0,
all_labels != nebularLabel
)
all_labels = measure.label(starMask)
all_labels1 = morphology.remove_small_objects(all_labels, min_size=50)
starMask = all_labels1 > 0
# Dilate a TOOON to be conservatine...
nebularSigma = 20.0 * gaussian_fwhm_to_sigma # FWHM = 3.0
# Build a kernel for detecting pixels above the threshold
nebularKernel = Gaussian2DKernel(nebularSigma, x_size=41, y_size=41)
nebularKernel.normalize()
nebularMask = convolve_fft(
nebularMask.astype(float),
nebularKernel.array
)
nebularMask = (nebularMask > 0.01)
# Expand a second time to be conservative
nebularMask = convolve_fft(
nebularMask.astype(float),
nebularKernel.array
)
nebularMask = (nebularMask > 0.01)
# Do a less aggressive dilation of the stellar mask
stellarSigma = 10.0 * gaussian_fwhm_to_sigma # FWHM = 3.0
# Build a kernel for detecting pixels above the threshold
stellarKernel = Gaussian2DKernel(stellarSigma, x_size=41, y_size=41)
stellarKernel.normalize()
stellarMask = convolve_fft(
fgdRegion.astype(float),
stellarKernel.array
)
stellarMask = (stellarMask > 0.01)
# Recombine the nebular and stellar components
fgdRegion = np.logical_or(nebularMask, stellarMask)
# Return the flux-bright pixels to the user
return fgdRegion
################################################################################
# Read in the Kokopelli Mask
kokopelliMask = ai.reduced.ReducedScience.read('kokopelliMask.fits')
# Dilate the mask in order to be more conservative.
kokopelliMask.data = ndimage.binary_dilation(kokopelliMask.data, iterations=8).astype(int)
# Construct the 2MASS masks and save to disk
# Read in all the 2MASS images and store them in a dictionary for quick reference
TMASS_Hfiles = np.array(glob.glob(os.path.join(TMASSdir, '*H.fits')))
TMASS_Kfiles = np.array(glob.glob(os.path.join(TMASSdir, '*Ks.fits')))
# Read in the 2MASS images
TMASS_HimgList = [ai.reduced.ReducedScience.read(f) for f in TMASS_Hfiles]
TMASS_KimgList = [ai.reduced.ReducedScience.read(f) for f in TMASS_Kfiles]
# Convert the images to "background masks"
for img in TMASS_HimgList:
# Construct the output name for this mask
base = os.path.basename(img.filename)
targetMask = base.split('.')[0] + '_mask.fits'
outFilename = os.path.join(TMASSdir, targetMask)
# Skip files that have already been done
if os.path.isfile(outFilename): continue
print('Building background mask for {}'.format(os.path.basename(img.filename)))
tmp = img.copy()
tmp.data = find_2MASS_flux(img.data).astype(int)
tmp.write(outFilename, dtype=np.uint8)
for img in TMASS_KimgList:
# Construct the output name for this mask
base = os.path.basename(img.filename)
targetMask = base.split('.')[0] + '_mask.fits'
outFilename = os.path.join(TMASSdir, targetMask)
# Skip files that have already been done
if os.path.isfile(outFilename): continue
print('Building background mask for {}'.format(os.path.basename(img.filename)))
tmp = img.copy()
tmp.data = find_2MASS_flux(img.data).astype(int)
tmp.write(outFilename, dtype=np.uint8)
# Now that the 2MASS files have been read in, it is safe to set the Mimir_header_handler
ai.set_instrument('Mimir')
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
# Set the Vizier download size to be unlimited
Vizier.ROW_LIMIT = -1
# Group by target HWP
groupedFileIndex = fileIndex.group_by(['GROUP_ID', 'HWP'])
#Loop through each file in the fileList variable
numberOfFiles = len(fileIndex)
bkgLevels = fileIndex['BACKGROUND']
print('{0:3.1%} complete'.format(0), end='\r')
iRow = 0
for group in groupedFileIndex.groups:
# Increment the row counter
iRow += len(group)
# Grab the relevant information for this group
thisTarget = np.unique(group['TARGET'])[0]
thisFilter = np.unique(group['FILTER'])[0]
# Re-group by dither pointing
ABBAsubGroups = group.group_by(['AB'])
for ABBAgroup in ABBAsubGroups.groups:
# Grab the relevant information for this subgroup
thisAB = np.unique(ABBAgroup['AB'])[0]
# If this is an on-target (A) subgroup, then skip it!
if thisAB == 'A': continue
# Grab the off-target files
Bfiles = []
maskFilenames = []
for thisFile in ABBAgroup['FILENAME']:
# Append the B-file to use
Bfiles.append(os.path.join(S3_dir, thisFile))
# BUild the mask name
maskBasename = os.path.basename(thisFile)
maskFilenames.append(os.path.join(starMaskDir, maskBasename))
# Check if the file has already been written and skip those which have been
if all([os.path.isfile(f) for f in maskFilenames]): continue
# Read in the off-target frames
Bimgs = [ai.reduced.ReducedScience.read(f) for f in Bfiles]
numBimgs = len(Bimgs)
if numBimgs > 1:
# Combine the images to get a quick map of the region to download
BimgStack = ai.utilitywrappers.ImageStack(Bimgs, gobble=False)
BimgStack.align_images_with_wcs()
# Determine the boundaries of the region to download 2MASS data
referenceImage = BimgStack.imageList[0]
else:
referenceImage = Bimgs[0]
# Get the image shape and coordinates
ny, nx = referenceImage.shape
lfrt, bttp = referenceImage.wcs.wcs_pix2world([0, ny], [0, nx], 0)
lf, rt = lfrt
bt, tp = bttp
# Grab the maximum width and the median (RA, Dec)
RAcen, DecCen = 0.5*(lf + rt), 0.5*(bt + tp)
height = (tp - bt)*u.deg
width = (lf - rt)*np.cos(np.deg2rad(DecCen))*u.deg
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Download the 2MASS point source catalog
tmassCatalog = Vizier.query_region(
SkyCoord(
ra=RAcen, dec=DecCen,
unit=(u.deg, u.deg),
frame='fk5'
),
width=width,
height=height,
catalog='II/246/out'
)[0]
# Cut off low SNR detections
tmassFilter = referenceImage.filter[0]
tmassSNR = tmassCatalog[tmassFilter+'snr']
goodDetections = np.logical_and(
tmassSNR.data.data > 5.0,
np.logical_not(tmassSNR.mask)
)
goodDetections = np.logical_and(
goodDetections,
np.logical_not(tmassCatalog[tmassFilter+'mag'].mask)
)
# Cull the bad data
tmassCatalog = tmassCatalog[goodDetections]
# Grab the RAs, Decs, and magnitudes
RAs, Decs = tmassCatalog['_RAJ2000'], tmassCatalog['_DEJ2000']
mags = tmassCatalog[tmassFilter+'mag']
# Loop through each file and build the preliminary mask
starMasks = []
for thisImg in Bimgs:
# # Read in the image and store it for possible later use
# thisImg = ai.reduced.ReducedScience.read(Bfile)
#
# # Attempt to recover a background estimate. If not, then just fill with -1e6
# # Locate the non-stellar pixels in this image
# photAnalyzer = ai.utilitywrappers.PhotometryAnalyzer(thisImg)
# try:
# _, psfParams = photAnalyzer.get_psf()
# FWHM = 2.355*np.sqrt(psfParams['sminor']*psfParams['smajor'])
# except:
# FWHM = 4.5
# xs, ys = thisImg.get_sources(FWHMguess = FWHM, minimumSNR = 3.5,
# satLimit = 1e20, edgeLimit = 21)
# starFluxes, fluxUncerts = photAnalyzer.aperture_photometry(
# xs, ys, FWHM, 24, 26, mask=(thisImg.data < -1e4)
# )
#
# # Catch bad stars
# kokopelliArtifacts = kokopelliMask.data[ys.round().astype(int), xs.round().astype(int)]
#
# # Look through the stars in Kokopelly and determine which are *real*
# realStars = (kokopelliArtifacts.astype(int)*starFluxes > 4e3)
# kokopelliArtifacts = np.logical_and(
# kokopelliArtifacts,
# np.logical_not(realStars)
# )
#
# # Only keep those stars which are not kokopilli artifacts
# goodInds = np.where(
# np.logical_and(
# starFluxes > 0,
# np.logical_not(kokopelliArtifacts)
# )
# )
# xs = xs[goodInds]
# ys = ys[goodInds]
# starFluxes = starFluxes[goodInds]
# Now simply mask *any* of the stars downloaded
xs, ys = thisImg.wcs.wcs_world2pix(RAs, Decs, 0)
starRadii = 35 - 1.5*mags
# Loop through each star and make its mask
ny, nx = thisImg.shape
yy, xx = np.mgrid[0:ny, 0:nx]
starMask = False
for xs1, ys1, rs in zip(xs, ys, starRadii):
if not np.isfinite(rs): import pdb;
# Compute the distances from this star
# Mask any pixels within 1 radius of this star
starMask = np.logical_or(
starMask,
np.sqrt((xx - xs1)**2 + (yy - ys1)**2) < rs
)
# Store the mask for later use
starMasks.append(starMask)
# # If more than one image exists in this group, then do a secondary pass to
# # locate the dimmer stars
# numBimgs = len(Bimgs)
# if numBimgs == 1:
# # Grab the available Bimg
# tmpImg = Bimgs[0].copy()
#
# # Smooth the data to look for lower SNR stars
# tmpImg.data = ndimage.median_filter(tmpImg.data, 3)
#
# # Construct a temporary image to do another pass as the low SNR stars
# tmpFWHM = np.sqrt(FWHM**2 + (0.5*3)**2)
# xs, ys = tmpImg.get_sources(FWHMguess = tmpFWHM, minimumSNR = 2.5,
# satLimit = 1e20, edgeLimit = 21)
# starFluxes, fluxUncerts = photAnalyzer.aperture_photometry(
# xs, ys, tmpFWHM, 24, 26, mask=(tmpImg.data < -1e4)
# )
#
# # Catch bad stars
# kokopelliArtifacts = kokopelliMask.data[ys.round().astype(int), xs.round().astype(int)]
#
# # Look through the stars in Kokopelly and determine which are *real*
# realStars = (kokopelliArtifacts.astype(int)*starFluxes > 4e3)
# kokopelliArtifacts = np.logical_and(
# kokopelliArtifacts,
# np.logical_not(realStars)
# )
#
# # Only keep those stars which are not kokopilli artifacts
# goodInds = np.where(
# np.logical_and(
# starFluxes > 0,
# np.logical_not(kokopelliArtifacts)
# )
# )
# xs = xs[goodInds]
# ys = ys[goodInds]
# starFluxes = starFluxes[goodInds]
# starRadii = 5*np.log10(starFluxes)
#
# # Loop through each star and make its mask
# ny, nx = thisImg.shape
# yy, xx = np.mgrid[0:ny, 0:nx]
# starMask = starMasks[0]
# for xs1, ys1, rs in zip(xs, ys, starRadii):
# if not np.isfinite(rs): import pdb;
# # Compute the distances from this star
# # Mask any pixels within 1 radius of this star
# starMask = np.logical_or(
# starMask,
# np.sqrt((xx - xs1)**2 + (yy - ys1)**2) < rs
# )
#
# # Store the mask for later use
# starMasks[0] = starMask
#
# elif numBimgs > 1:
# # Loop through each
# for iImg in range(numBimgs):
# # Determine which image is the primary image and which is secondary
# if iImg == 0:
# thisImg = Bimgs[0]
# otherImg = Bimgs[1]
# elif iImg == 1:
# thisImg = Bimgs[1]
# otherImg = Bimgs[0]
# else:
# print('What?! How did you even get here?!')
# import pdb; pdb.set_trace()
#
# # Grab the corresponding mask
# thisMask = starMasks[iImg]
#
# # Subtract the two images from eachother
# diffData = otherImg.data - thisImg.data
#
# # Smooth the difference image
# median9Data = ndimage.median_filter(diffData, 9)
#
# # LOOK FOR DIVOTS IN GENERAL MEDIAN FILTERED IMAGE
# # Locate pixels less than negative 2-sigma
# mean9, median9, stddev9 = sigma_clipped_stats(median9Data)
# starDivots = np.nan_to_num(median9Data) < (mean9 -4*stddev9)
#
# # Remove anything that is smaller than 20 pixels
# all_labels = measure.label(starDivots)
# all_labels1 = morphology.remove_small_objects(all_labels, min_size=20)
# label_hist, label_bins = np.histogram(
# all_labels1,
# bins=np.arange(all_labels1.max() - all_labels1.min())
# )
# label_mode = label_bins[label_hist.argmax()]
# starDivots = all_labels1 != label_mode
#
# # Remove any pixels along extreme top
# starDivots[ny-10:ny,:] = False
#
# # Dialate the starDivots mask
# stellarSigma = 5.0 * gaussian_fwhm_to_sigma # FWHM = 3.0
#
# # Build a kernel for detecting pixels above the threshold
# stellarKernel = Gaussian2DKernel(stellarSigma, x_size=41, y_size=41)
# stellarKernel.normalize()
# starDivots = convolve_fft(
# starDivots.astype(float),
# stellarKernel.array
# )
# starDivots = (starDivots > 0.01)
#
# # Compbine the divots mask and the original mask
# fullMask = np.logical_or(thisMask, starDivots)
#
# # Store the mask back in its list
# starMasks[iImg] = ai.reduced.ReducedScience(fullMask.astype(int))
#
# # Do a finel loop-through to make sure there is as much agreement between
# # the two masks as possible
# if numBimgs > 1:
# # Construct an image stack and compute image offsets
# BimgStack = ai.utilitywrappers.ImageStack(Bimgs)
# dx, dy = BimgStack.get_wcs_offsets(BimgStack)
#
# try:
# starMask0 = starMasks[0].copy()
# starMask1 = starMasks[1].copy()
# except:
# print('Why are there not 2 starMasks?')
# import pdb; pdb.set_trace()
#
# for iMask in range(numBimgs):
# # Determine which image is the primary image and which is secondary
# if iMask == 0:
# dx1 = dx[1] - dx[0]
# dy1 = dy[1] - dy[0]
# thisMask = starMask0
# otherMask = starMask1
# elif iMask == 1:
# dx1 = dx[0] - dx[1]
# dy1 = dy[0] - dy[1]
# thisMask = starMask1
# otherMask = starMask0
# else:
# print('What?! How did you even get here?!')
# import pdb; pdb.set_trace()
#
# # Shift the mask accordingly
# shiftedOtherMask = otherMask.shift(dx1, dy1)
#
# # Combine this mask and the shifted mask
# fullMask = np.logical_or(
# thisMask.data,
# shiftedOtherMask.data
# )
#
# # Store the mask for a final write-to-disk
# starMasks[iMask] = fullMask
# Look through the masks and write to disk
for maskFile, starMask in zip(maskFilenames, starMasks):
try:
# Write the mask to disk
maskImg = ai.reduced.ReducedScience(starMask.astype(int))
maskImg.write(maskFile, dtype=np.uint8)
except:
print('Failed to save file {}'.format(maskFile))
# Update on progress
print('{0:3.1%} complete'.format(iRow/numberOfFiles), end='\r')
# Alert the user that everything is complete
print('{0:3.1%} complete'.format(1), end='\n\n')
print('Done!')
|
mit
|
antoinecarme/pyaf
|
tests/perf_MedAE/test_ozone_MedAE.py
|
1
|
1207
|
from __future__ import absolute_import
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mModelSelection_Criterion = "MedAE"
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone_MedAE");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
|
bsd-3-clause
|
andrewjrobinson/FreeCAD_sf_master
|
src/Mod/Plot/plotSeries/TaskPanel.py
|
1
|
12621
|
#***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
# FreeCAD modules
import FreeCAD as App
import FreeCADGui as Gui
# Qt library
from PyQt4 import QtGui,QtCore
# Module
import Plot
from plotUtils import Paths
# matplotlib
import matplotlib
from matplotlib.lines import Line2D
import matplotlib.colors as Colors
class TaskPanel:
def __init__(self):
self.ui = Paths.modulePath() + "/plotSeries/TaskPanel.ui"
self.skip = False
self.item = 0
self.plt = None
def accept(self):
return True
def reject(self):
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = form.findChild(QtGui.QListWidget, "items")
form.label = form.findChild(QtGui.QLineEdit, "label")
form.isLabel = form.findChild(QtGui.QCheckBox, "isLabel")
form.style = form.findChild(QtGui.QComboBox, "lineStyle")
form.marker = form.findChild(QtGui.QComboBox, "markers")
form.width = form.findChild(QtGui.QDoubleSpinBox, "lineWidth")
form.size = form.findChild(QtGui.QSpinBox, "markerSize")
form.color = form.findChild(QtGui.QPushButton, "color")
form.remove = form.findChild(QtGui.QPushButton, "remove")
self.form = form
self.retranslateUi()
self.fillStyles()
self.updateUI()
QtCore.QObject.connect(form.items, QtCore.SIGNAL("currentRowChanged(int)"),self.onItem)
QtCore.QObject.connect(form.label, QtCore.SIGNAL("editingFinished()"),self.onData)
QtCore.QObject.connect(form.isLabel,QtCore.SIGNAL("stateChanged(int)"),self.onData)
QtCore.QObject.connect(form.style, QtCore.SIGNAL("currentIndexChanged(int)"),self.onData)
QtCore.QObject.connect(form.marker, QtCore.SIGNAL("currentIndexChanged(int)"),self.onData)
QtCore.QObject.connect(form.width, QtCore.SIGNAL("valueChanged(double)"),self.onData)
QtCore.QObject.connect(form.size, QtCore.SIGNAL("valueChanged(int)"),self.onData)
QtCore.QObject.connect(form.color, QtCore.SIGNAL("pressed()"),self.onColor)
QtCore.QObject.connect(form.remove, QtCore.SIGNAL("pressed()"),self.onRemove)
QtCore.QObject.connect(Plot.getMdiArea(),QtCore.SIGNAL("subWindowActivated(QMdiSubWindow*)"),self.onMdiArea)
return False
def getMainWindow(self):
"returns the main window"
# using QtGui.qApp.activeWindow() isn't very reliable because if another
# widget than the mainwindow is active (e.g. a dialog) the wrong widget is
# returned
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise Exception("No main window found")
def retranslateUi(self):
""" Set user interface locale strings.
"""
self.form.setWindowTitle(QtGui.QApplication.translate("plot_series", "Configure series",
None,QtGui.QApplication.UnicodeUTF8))
self.form.isLabel.setText(QtGui.QApplication.translate("plot_series", "No label",
None,QtGui.QApplication.UnicodeUTF8))
self.form.remove.setText(QtGui.QApplication.translate("plot_series", "Remove serie",
None,QtGui.QApplication.UnicodeUTF8))
self.form.findChild(QtGui.QLabel, "styleLabel").setText(QtGui.QApplication.translate("plot_series", "Line style",
None,QtGui.QApplication.UnicodeUTF8))
self.form.findChild(QtGui.QLabel, "markerLabel").setText(QtGui.QApplication.translate("plot_series", "Marker",
None,QtGui.QApplication.UnicodeUTF8))
self.form.items.setToolTip(QtGui.QApplication.translate("plot_series", "List of available series",
None,QtGui.QApplication.UnicodeUTF8))
self.form.label.setToolTip(QtGui.QApplication.translate("plot_series", "Line title",
None,QtGui.QApplication.UnicodeUTF8))
self.form.isLabel.setToolTip(QtGui.QApplication.translate("plot_series", "If checked serie will not be considered for legend",
None,QtGui.QApplication.UnicodeUTF8))
self.form.style.setToolTip(QtGui.QApplication.translate("plot_series", "Line style",
None,QtGui.QApplication.UnicodeUTF8))
self.form.marker.setToolTip(QtGui.QApplication.translate("plot_series", "Marker style",
None,QtGui.QApplication.UnicodeUTF8))
self.form.width.setToolTip(QtGui.QApplication.translate("plot_series", "Line width",
None,QtGui.QApplication.UnicodeUTF8))
self.form.size.setToolTip(QtGui.QApplication.translate("plot_series", "Marker size",
None,QtGui.QApplication.UnicodeUTF8))
self.form.color.setToolTip(QtGui.QApplication.translate("plot_series", "Line and marker color",
None,QtGui.QApplication.UnicodeUTF8))
self.form.remove.setToolTip(QtGui.QApplication.translate("plot_series", "Removes this serie",
None,QtGui.QApplication.UnicodeUTF8))
def fillStyles(self):
""" Fill style combo boxes. """
# Line styles
linestyles = Line2D.lineStyles.keys()
for i in range(0,len(linestyles)):
style = linestyles[i]
string = "\'" + str(style) + "\' (" + Line2D.lineStyles[style] + ")"
self.form.style.addItem(string)
# Markers
markers = Line2D.markers.keys()
for i in range(0,len(markers)):
marker = markers[i]
string = "\'" + str(marker) + "\' (" + Line2D.markers[marker] + ")"
self.form.marker.addItem(string)
def onItem(self, row):
""" Executed when selected item is modified. """
if not self.skip:
self.skip = True
# Get selected item
self.item = row
# Call to update
self.updateUI()
self.skip = False
def onData(self):
""" Executed when selected item data is modified. """
if not self.skip:
self.skip = True
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Set label
serie = Plot.series()[self.item]
if(self.form.isLabel.isChecked()):
serie.name = None
self.form.label.setEnabled(False)
else:
serie.name = self.form.label.text()
self.form.label.setEnabled(True)
# Set line style and marker
style = self.form.style.currentIndex()
linestyles = Line2D.lineStyles.keys()
serie.line.set_linestyle(linestyles[style])
marker = self.form.marker.currentIndex()
markers = Line2D.markers.keys()
serie.line.set_marker(markers[marker])
# Set line width and marker size
serie.line.set_linewidth(self.form.width.value())
serie.line.set_markersize(self.form.size.value())
plt.update()
# Regenerate series labels
self.setList()
self.skip = False
def onColor(self):
""" Executed when color pallete is requested. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Show widget to select color
col = QtGui.QColorDialog.getColor()
# Send color to widget and serie
if col.isValid():
serie = plt.series[self.item]
self.form.color.setStyleSheet("background-color: rgb(%d, %d, %d);" % (col.red(),
col.green(), col.blue()))
serie.line.set_color((col.redF(), col.greenF(), col.blueF()))
plt.update()
def onRemove(self):
""" Executed when data serie must be removed. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Remove serie
Plot.removeSerie(self.item)
self.setList()
self.updateUI()
plt.update()
def onMdiArea(self, subWin):
""" Executed when window is selected on mdi area.
@param subWin Selected window.
"""
plt = Plot.getPlot()
if plt != subWin:
self.updateUI()
def updateUI(self):
""" Setup UI controls values if possible """
plt = Plot.getPlot()
self.form.items.setEnabled(bool(plt))
self.form.label.setEnabled(bool(plt))
self.form.isLabel.setEnabled(bool(plt))
self.form.style.setEnabled(bool(plt))
self.form.marker.setEnabled(bool(plt))
self.form.width.setEnabled(bool(plt))
self.form.size.setEnabled(bool(plt))
self.form.color.setEnabled(bool(plt))
self.form.remove.setEnabled(bool(plt))
if not plt:
self.plt = plt
self.form.items.clear()
return
self.skip = True
# Refill list
if self.plt != plt or len(Plot.series()) != self.form.items.count():
self.plt = plt
self.setList()
# Ensure that have series
if not len(Plot.series()):
self.form.label.setEnabled(False)
self.form.isLabel.setEnabled(False)
self.form.style.setEnabled(False)
self.form.marker.setEnabled(False)
self.form.width.setEnabled(False)
self.form.size.setEnabled(False)
self.form.color.setEnabled(False)
self.form.remove.setEnabled(False)
return
# Set label
serie = Plot.series()[self.item]
if serie.name == None:
self.form.isLabel.setChecked(True)
self.form.label.setEnabled(False)
self.form.label.setText("")
else:
self.form.isLabel.setChecked(False)
self.form.label.setText(serie.name)
# Set line style and marker
self.form.style.setCurrentIndex(0)
linestyles = Line2D.lineStyles.keys()
for i in range(0,len(linestyles)):
style = linestyles[i]
if style == serie.line.get_linestyle():
self.form.style.setCurrentIndex(i)
self.form.marker.setCurrentIndex(0)
markers = Line2D.markers.keys()
for i in range(0,len(markers)):
marker = markers[i]
if marker == serie.line.get_marker():
self.form.marker.setCurrentIndex(i)
# Set line width and marker size
self.form.width.setValue(serie.line.get_linewidth())
self.form.size.setValue(serie.line.get_markersize())
# Set color
color = Colors.colorConverter.to_rgb(serie.line.get_color())
self.form.color.setStyleSheet("background-color: rgb(%d, %d, %d);" % (int(color[0]*255),
int(color[1]*255), int(color[2]*255)))
self.skip = False
def setList(self):
""" Setup UI controls values if possible """
self.form.items.clear()
series = Plot.series()
for i in range(0,len(series)):
serie = series[i]
string = 'serie ' + str(i) + ': '
if serie.name == None:
string = string + '\"No label\"'
else:
string = string + serie.name
self.form.items.addItem(string)
# Ensure that selected item is correct
if len(series) and self.item >= len(series):
self.item = len(series)-1
self.form.items.setCurrentIndex(self.item)
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
|
lgpl-2.1
|
JsNoNo/scikit-learn
|
sklearn/tests/test_calibration.py
|
213
|
12219
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
|
bsd-3-clause
|
phobson/wqio
|
wqio/viz.py
|
2
|
18243
|
from functools import partial
import numpy
from matplotlib import pyplot
from matplotlib import ticker
from pandas.api.types import CategoricalDtype
import seaborn
import probscale
from wqio import utils
from wqio import validate
def rotateTickLabels(ax, rotation, which, rotation_mode="anchor", ha="right"):
""" Rotates the ticklabels of a matplotlib Axes
Parameters
----------
ax : matplotlib Axes
The Axes object that will be modified.
rotation : float
The amount of rotation, in degrees, to be applied to the labels.
which : string
The axis whose ticklabels will be rotated. Valid values are 'x',
'y', or 'both'.
rotation_mode : string, optional
The rotation point for the ticklabels. Highly recommended to use
the default value ('anchor').
ha : string
The horizontal alignment of the ticks. Again, recommended to use
the default ('right').
Returns
-------
None
"""
if which == "both":
rotateTickLabels(ax, rotation, "x", rotation_mode=rotation_mode, ha=ha)
rotateTickLabels(ax, rotation, "y", rotation_mode=rotation_mode, ha=ha)
else:
if which == "x":
axis = ax.xaxis
elif which == "y":
axis = ax.yaxis
for t in axis.get_ticklabels():
t.set_horizontalalignment(ha)
t.set_rotation(rotation)
t.set_rotation_mode(rotation_mode)
def log_formatter(use_1x=True, threshold=5):
def _formatter(tick, pos=None, use_1x=True, threshold=3):
""" Formats log axes as `1 x 10^N` when N > 4 or N < -4. """
if 10 ** threshold >= tick > 1:
tick = "{:,d}".format(int(tick))
elif tick > 10 ** threshold or tick < 10 ** (-1 * threshold):
if use_1x:
tick = r"$1 \times 10 ^ {%d}$" % int(numpy.log10(tick))
else:
tick = r"$10 ^ {%d}$" % int(numpy.log10(tick))
return str(tick)
func = partial(_formatter, use_1x=use_1x, threshold=threshold)
return ticker.FuncFormatter(func)
def gridlines(
ax, xlabel=None, ylabel=None, xscale=None, yscale=None, xminor=True, yminor=True
):
""" Standard formatting for gridlines on a matplotlib Axes
Parameters
----------
ax : matplotlib Axes
The Axes object that will be modified.
xlabel, ylabel : string, optional
The labels of the x- and y-axis.
xscale, yscale : string, optional
The scale of each axis. Can be 'linear', 'log', or 'prob'.
xminor, yminor : bool, optional
Toggles the grid on minor ticks. Has no effect if minor ticks
are not present.
Returns
-------
None
"""
# set the scales
if xscale is not None:
ax.set_xscale(xscale)
if yscale is not None:
ax.set_yscale(yscale)
# set the labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# major grids
ax.yaxis.grid(True, which="major", ls="-", alpha=0.35)
ax.xaxis.grid(True, which="major", ls="-", alpha=0.35)
# minor grids
if xminor:
ax.xaxis.grid(True, which="minor", ls="-", alpha=0.17)
if yminor:
ax.yaxis.grid(True, which="minor", ls="-", alpha=0.17)
def one2one(ax, set_limits=True, set_aspect=True, **kwargs):
label = kwargs.pop("label", "1:1 Line")
axis_limits = [
numpy.min([ax.get_xlim(), ax.get_ylim()]),
numpy.max([ax.get_xlim(), ax.get_ylim()]),
]
if set_limits:
ax.set_xlim(axis_limits)
ax.set_ylim(axis_limits)
if set_aspect:
ax.set_aspect("equal")
return ax.plot(axis_limits, axis_limits, label=label, **kwargs)
def jointplot(
x=None,
y=None,
data=None,
xlabel=None,
ylabel=None,
color=None,
zeromin=True,
one2one=True,
):
""" Plots the joint distribution of two variables via seaborn
Parameters
----------
x, y : array-like or string
Sequences of values or column names found within ``data``.
data : pandas DataFrame or None, optional
An optional DataFrame containing the data.
xlabel, ylabel : string, optional
Overrides the default x- and y-axis labels.
color : matplotlib color, optional
Color used for the plot elements.
zeromin : bool, optional
When True (default), force lower axes limits to 0.
one2one : bool, optional
When True (default), plots the 1:1 line on the axis and sets
the x- and y-axis limits to be equal.
Returns
-------
jg : seaborn.JointGrid
"""
jg = seaborn.jointplot(
x=x, y=y, color=color, data=data, marginal_kws=dict(rug=True, kde=True)
)
if xlabel is None:
xlabel = jg.ax_joint.get_xlabel()
if ylabel is None:
ylabel = jg.ax_joint.get_ylabel()
jg.set_axis_labels(xlabel=xlabel, ylabel=ylabel)
if zeromin:
jg.ax_joint.set_xlim(left=0)
jg.ax_joint.set_ylim(bottom=0)
if one2one:
ax_limit_min = numpy.min([jg.ax_joint.get_xlim(), jg.ax_joint.get_ylim()])
ax_limit_max = numpy.max([jg.ax_joint.get_xlim(), jg.ax_joint.get_ylim()])
jg.ax_joint.set_xlim(left=ax_limit_min, right=ax_limit_max)
jg.ax_joint.set_ylim(bottom=ax_limit_min, top=ax_limit_max)
jg.ax_joint.plot(
[ax_limit_min, ax_limit_max],
[ax_limit_min, ax_limit_max],
marker="None",
linestyle="-",
linewidth=1.75,
color=color or "k",
alpha=0.45,
label="1:1 line",
)
jg.ax_joint.legend(frameon=False, loc="upper left")
return jg
def whiskers_and_fliers(x, q1=None, q3=None, transformout=None):
""" Computes extent of whiskers and fliers on optionally transformed
data for box and whisker plots.
Parameters
----------
x : array-like
Sequence of optionally transformed data.
q1, q3 : floats, optional
First and third quartiles of the optionally transformed data.
transformout : callable, optional
Function to un-transform the results back into the original
space of the data.
Returns
-------
whiskers_and_fliers : dict
Dictionary of whisker and fliers values.
Examples
--------
>>> x = numpy.random.lognormal(size=37)
>>> whisk_fly = whiskers_and_fliers(numpy.log(x), transformout=numpy.exp)
See also
--------
wqio.utils.figutils.boxplot
"""
wnf = {}
if transformout is None:
def transformout(x):
return x
if q1 is None:
q1 = numpy.percentile(x, 25)
if q3 is None:
q3 = numpy.percentile(x, 75)
iqr = q3 - q1
# get low extreme
loval = q1 - (1.5 * iqr)
whislo = numpy.compress(x >= loval, x)
if len(whislo) == 0 or numpy.min(whislo) > q1:
whislo = q1
else:
whislo = numpy.min(whislo)
# get high extreme
hival = q3 + (1.5 * iqr)
whishi = numpy.compress(x <= hival, x)
if len(whishi) == 0 or numpy.max(whishi) < q3:
whishi = q3
else:
whishi = numpy.max(whishi)
wnf["fliers"] = numpy.hstack(
[
transformout(numpy.compress(x < whislo, x)),
transformout(numpy.compress(x > whishi, x)),
]
)
wnf["whishi"] = transformout(whishi)
wnf["whislo"] = transformout(whislo)
return wnf
def boxplot(
boxplot_stats,
ax=None,
position=None,
width=0.8,
shownotches=True,
color="b",
marker="o",
patch_artist=True,
showmean=False,
):
"""
Draws a boxplot on an axes
Parameters
----------
boxplot_stats : list of dicts
List of matplotlib boxplot-compatible statistics to be plotted
ax : matplotlib Axes, optional
The axis on which the boxplot will be drawn.
position : int or list of int, optional
Location on the x-axis where the boxplot will be drawn.
width : float, optional (default = 0.8)
Width of the boxplots.
shownotches : bool, optional (default = True)
Toggles notched boxplots where the notches show a confidence
interval around the mediand.
color : string, optional (default = 'b')
Matplotlib color used to plot the outliers, median or box, and
the optional mean.
marker : str, optional (default = 'o')
Matplotlib marker used for the the outliers and optional mean.
patch_artist : bool, optional (default = True)
Toggles drawing the boxes as a patch filled in with ``color``
and a black median or as a black where the median is drawn
in the ``color``.
showmean : bool, optional (default = False)
Toggles inclusion of the means in the boxplots.
Returns
-------
bp : dictionary of matplotlib artists
The graphical elements of the boxplot.
"""
fig, ax = validate.axes(ax)
if position is None:
position = numpy.arange(len(boxplot_stats)) + 1
elif numpy.isscalar(position):
position = [position]
meanprops = dict(
marker=marker, markersize=6, markerfacecolor=color, markeredgecolor="Black"
)
flierprops = dict(
marker=marker,
markersize=4,
zorder=4,
markerfacecolor="none",
markeredgecolor=color,
alpha=1,
)
whiskerprops = dict(linestyle="-", color="k", linewidth=0.75, zorder=4)
if patch_artist:
medianprops = dict(linewidth=1.00, color="k", linestyle="-", zorder=5)
boxprops = dict(
edgecolor="k", facecolor=color, linewidth=0.75, zorder=4, alpha=0.5
)
else:
medianprops = dict(linewidth=1.00, color=color, linestyle="-", zorder=3)
boxprops = dict(color="k", linewidth=0.75, zorder=4)
bp = ax.bxp(
boxplot_stats,
positions=position,
widths=width,
showmeans=showmean,
meanprops=meanprops,
flierprops=flierprops,
whiskerprops=whiskerprops,
medianprops=medianprops,
boxprops=boxprops,
shownotches=shownotches,
showcaps=False,
manage_ticks=False,
patch_artist=patch_artist,
)
return bp
def probplot(
data,
ax=None,
axtype="prob",
yscale="log",
xlabel=None,
ylabel=None,
bestfit=False,
scatter_kws=None,
line_kws=None,
return_results=False,
):
""" Probability, percentile, and quantile plots.
Parameters
----------
data : sequence or array-like
1-dimensional data to be plotted
ax : optional matplotlib axes object or None (default).
The Axes on which to plot. If None is provided, one will be
created.
axtype : string (default = 'pp')
Type of plot to be created. Options are:
- 'prob': probabilty plot
- 'pp': percentile plot
- 'qq': quantile plot
yscale : string (default = 'log')
Scale for the y-axis. Use 'log' for logarithmic (default) or
'linear'.
xlabel, ylabel : string or None (default)
Axis labels for the plot.
bestfit : bool, optional (default is False)
Specifies whether a best-fit line should be added to the
plot.
scatter_kws, line_kws : dictionary
Dictionary of keyword arguments passed directly to `pyplot.plot`
when drawing the scatter points and best-fit line, respectively.
return_results : bool (default = False)
If True a dictionary of results of is returned along with the
figure. Keys are:
q - array of quantiles
x, y - arrays of data passed to function
xhat, yhat - arrays of modeled data plotted in best-fit line
res - a statsmodels Result object.
Returns
-------
fig : matplotlib.Figure
result : dictionary of linear fit results.
"""
output = probscale.viz.probplot(
data,
ax=ax,
plottype=axtype,
probax="x",
datalabel=ylabel,
problabel=xlabel,
datascale=yscale,
scatter_kws=scatter_kws,
line_kws=line_kws,
bestfit=bestfit,
return_best_fit_results=return_results,
)
return output
def _connect_spines(left_ax, right_ax, left_y, right_y, linestyle="solid", **line_kwds):
""" Connects the y-spines between two Axes
Parameters
----------
left_ax, right_ax : matplotlib Axes objects
The Axes that need to be connected.
left_y, right_y : float
Values on the spines that wil be connected.
linestyle : string, optional (default = 'solid')
The line style to use. Valid values are 'solid', 'dashed',
'dashdot', 'dotted'.
**line_kwds : keyword arguments
Additional options for style the line.
Returns
-------
connector : BboxConnector
The weird mpl-line-like-thingy that connects the spines.
"""
import matplotlib.transforms as mtrans
import mpl_toolkits.axes_grid1.inset_locator as inset
left_trans = mtrans.blended_transform_factory(left_ax.transData, left_ax.transAxes)
right_trans = mtrans.blended_transform_factory(
right_ax.transData, right_ax.transAxes
)
left_data_trans = left_ax.transScale + left_ax.transLimits
right_data_trans = right_ax.transScale + right_ax.transLimits
left_pos = left_data_trans.transform((0, left_y))[1]
right_pos = right_data_trans.transform((0, right_y))[1]
bbox = mtrans.Bbox.from_extents(0, left_pos, 0, right_pos)
right_bbox = mtrans.TransformedBbox(bbox, right_trans)
left_bbox = mtrans.TransformedBbox(bbox, left_trans)
# deal with the linestyle
connector = inset.BboxConnector(
left_bbox, right_bbox, loc1=3, loc2=2, linestyle=linestyle, **line_kwds
)
connector.set_clip_on(False)
left_ax.add_line(connector)
return connector
def parallel_coordinates(
dataframe, hue, cols=None, palette=None, showlegend=True, **subplot_kws
):
""" Produce a parallel coordinates plot from a dataframe.
Parameters
----------
dataframe : pandas.DataFrame
The data to be plotted.
hue : string
The column used to the determine assign the lines' colors.
cols : list of strings, optional
The non-hue columns to include. If None, all other columns are
used.
palette : string, optional
Name of the seaborn color palette to use.
showlegend : bool (default = True)
Toggles including a legend on the plot.
**subplot_kws : keyword arguments
Options passed directly to pyplot.subplots()
Returns
-------
fig : matplotlib Figure
"""
# get the (non-hue) columns to plot
if cols is None:
cols = dataframe.columns.tolist()
cols.remove(hue)
# subset the data, putting the hue column last
# python 3.5: data = dataframe[[*cols, hue]]
data = dataframe[[*cols, hue]]
# these plots look ridiculous in anything other than 'ticks'
with seaborn.axes_style("ticks"):
fig, axes = pyplot.subplots(ncols=len(cols), **subplot_kws)
hue_vals = dataframe[hue].unique()
colors = seaborn.color_palette(palette=palette, n_colors=len(hue_vals))
color_dict = {}
lines = []
for h, c in zip(hue_vals, colors):
lines.append(pyplot.Line2D([0], [0], linestyle="-", color=c, label=h))
color_dict[h] = c
for col, ax in zip(cols, axes):
data_limits = [(0, dataframe[col].min()), (0, dataframe[col].max())]
ax.set_xticks([0])
ax.update_datalim(data_limits)
ax.set_xticklabels([col])
ax.autoscale(axis="y")
ax.tick_params(axis="y", direction="inout")
ax.tick_params(axis="x", direction="in")
for row in data.values:
for n, (ax1, ax2) in enumerate(zip(axes[:-1], axes[1:])):
_connect_spines(ax1, ax2, row[n], row[n + 1], color=color_dict[row[-1]])
if showlegend:
fig.legend(lines, hue_vals)
fig.subplots_adjust(wspace=0)
seaborn.despine(fig=fig, bottom=True, trim=True)
return fig
def categorical_histogram(df, valuecol, bins, classifier=None, **factoropts):
""" Plot a faceted, categorical histogram.
Parameters
----------
df : pandas.DataFrame
Dataframe of storm information such as precipitation depth,
duration, presence of outflow, flow volume, etc.
valuecol : str
The name of the column that should be categorized and plotted.
bins : array-like
The right-edges of the histogram bins.
classifier : callable, optional
Function-like object that classifies the values in ``valuecol``.
Should accept a float scalar and return a string.
factoropts : keyword arguments, optional
Options passed directly to seaborn.factorplot
Returns
-------
fig : seaborn.FacetGrid
See also
--------
seaborn.factorplot
"""
def format_col(colname):
return colname.replace("_", " ").title()
def process_column(colname):
if colname is not None:
return format_col(colname)
if classifier is None:
classifier = partial(utils.classifier, bins=bins, units="mm")
cats = utils.unique_categories(classifier, bins)
cat_type = CategoricalDtype(cats, ordered=True)
aspect = factoropts.pop("aspect", 1.6)
display_col = format_col(valuecol)
processed_opts = dict(
row=process_column(factoropts.pop("row", None)),
col=process_column(factoropts.pop("col", None)),
hue=process_column(factoropts.pop("hue", None)),
kind="count",
aspect=aspect,
sharex=True,
)
final_opts = {**factoropts, **processed_opts}
fig = (
df.assign(display=df[valuecol].apply(classifier).astype(cat_type))
.drop([valuecol], axis=1)
.rename(columns={"display": valuecol})
.rename(columns=lambda c: format_col(c))
.pipe((seaborn.catplot, "data"), x=display_col, **final_opts)
.set_ylabels("Occurences")
)
return fig
|
bsd-3-clause
|
lepmik/miindio
|
miindio/miindcli.py
|
1
|
3554
|
import click
from miindio import MiindIO
import matplotlib
import matplotlib.pyplot as plt
import copy
@click.group()
def cli():
pass
@cli.command("submit", short_help='Generate a MIIND executable based on a ' +
'"xml" parameter file.')
@click.argument("xml_path", type=click.Path(exists=True))
@click.option("--directory", "-d", type=click.Path(exists=True))
@click.option("--run", "-r", is_flag=True)
def generate_(xml_path, **kwargs):
io = MiindIO(xml_path, kwargs['directory'])
io.submit()
if kwargs['run']:
io.run()
@cli.command("run", short_help='Run a MIIND executable based on a ' +
'"xml" parameter file.')
@click.argument("xml_path", type=click.Path(exists=True))
@click.option("--directory", "-d", type=click.Path(exists=True))
@click.option("--generate", "-g", is_flag=True)
def run_(xml_path, **kwargs):
io = MiindIO(xml_path, kwargs['directory'])
if kwargs['generate']:
io.submit()
io.run()
@cli.command("plot-marginal-density",
short_help='Plot marginal density of a model')
@click.argument("xml_path", type=click.Path(exists=True))
@click.argument("model_name", type=click.STRING)
@click.option("--directory", "-d", type=click.Path(exists=True))
@click.option("--n_bins_w", "-w", default=100, type=click.INT)
@click.option("--n_bins_v", "-v", default=100, type=click.INT)
def plot_marginal_density_(xml_path, model_name, **kwargs):
io = MiindIO(xml_path, kwargs['directory'])
marginal = io.marginal[model_name]
marginal.vn = kwargs['n_bins_v']
marginal.wn = kwargs['n_bins_w']
marginal.plot()
@cli.command("plot-density",
short_help='Plot 2D density of a model')
@click.argument("xml_path", type=click.Path(exists=True))
@click.argument("model_name", type=click.STRING)
@click.option("--directory", "-d", type=click.Path(exists=True))
@click.option("--timestep", "-t", type=click.INT)
def plot_density_(xml_path, model_name, **kwargs):
io = MiindIO(xml_path, kwargs['directory'])
density = io.density[model_name]
for fname in density.fnames[::kwargs['timestep']]:
density.plot_density(fname, save=True)
@cli.command("lost",
short_help='Plot .lost file')
@click.argument("lost_path", type=click.Path(exists=True))
def plot_lost_(lost_path, **kwargs):
from miindio.lost_tools import (add_fiducial, extract_base,
plot_lost, read_fiducial,
onclick, zoom_fun, onkey)
backend = matplotlib.get_backend().lower()
if backend not in ['qt4agg']:
print('Warning: backend not recognized as working with "lost.py", ' +
'if you do not encounter any issues with your current backend ' +
'{}, please add it to this list.'.format(backend))
curr_points = []
fig = plt.figure()
ax = plot_lost(lost_path)
fid_fname = extract_base(lost_path) + '.fid'
patches = read_fiducial(fid_fname)
quads = copy.deepcopy(patches)
for patch in patches:
add_fiducial(ax, patch)
fig.canvas.mpl_connect('button_press_event',
lambda event: onclick(event, ax, fid_fname,
curr_points, quads))
fig.canvas.mpl_connect('scroll_event', lambda event: zoom_fun(event, ax))
fig.canvas.mpl_connect('key_press_event',
lambda event: onkey(event, ax, fid_fname, quads))
plt.show()
def main():
cli()
if __name__ == "__main__":
sys.exit(main())
|
gpl-3.0
|
salsh/ROA-Analysis
|
scripts/plot_BGP_Impact_per_ASN.py
|
1
|
7517
|
#
# This file is part of ROA-Analysis
#
# Author: Samir Al-Sheikh (Freie Universitaet, Berlin)
# [email protected]
#
# MIT License
#
# Copyright (c) 2017 The ROA-Analysis authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import division
import os, matplotlib, calendar, types, pandas, warnings
from collections import Counter
from datetime import datetime
from utils.parse import parse_shell, parse_cfg
if parse_cfg(0).ssh_enabled: matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib as mpl
## Note: Warnings ignored (Type 3 font error if tex mode is active)
warnings.filterwarnings('ignore')
options = 'i:p:c:d:t:m'
def get_options():
return options
def main():
""" Dumps BGPReader output to file and analyses the different validation types """
## Parse shell arguments and config constants and build up file infrastructure
args = parse_shell(options); cfg = parse_cfg(args)
if isinstance(args.collector, types.StringTypes):
exit('Error: Collector number should be minimal two')
archive_an = cfg.archive_an + 'BGP_Impact_per_ASN/' + '_'.join(args.collector)
if not os.path.exists(archive_an): os.makedirs(archive_an)
## Convert argument interval to unix timestamps
start, end = args.interval.split(',')
if(start == '0' or end == '-1'): exit('Error: Open interval is not allowed')
start_unix = calendar.timegm(datetime.strptime(start, '%Y%m%d.%H%M').utctimetuple())
end_unix = calendar.timegm(datetime.strptime(end, '%Y%m%d.%H%M').utctimetuple())
inv_unix = str(start_unix) + ',' + str(end_unix)
export_df = archive_an + '/' + str(start_unix) + '_' + str(end_unix) + '_dataframe.csv'
## Get the bgpreader diff output
dump_diff_file = cfg.archive_an + 'BGP_Impact/' + '_'.join(args.collector) + '/' + \
cfg.bgp_project + '_' + cfg.bgp_vantage_point + '_' + inv_unix + '_diff.csv'
print 'BGP-Dump Diff: ' + dump_diff_file + '\n'
dump_file = '_'.join(dump_diff_file.split('_')[:-1]) + '.csv'
if not os.path.isfile(dump_diff_file):
exit('Error: BGP-Impact file does not exist, run BGP_Impact with w-mode')
## Support only plot mode for an existing dataframe
if(args.mode == 'a'):
if not os.path.isfile(export_df): exit('Error: Dataframe file does not exist')
df = pandas.read_csv(export_df, encoding='utf8', delimiter=',').set_index('Day')
start = pandas.to_datetime(df.index[0],format='%Y-%m-%d').strftime('%b %d ')
end = pandas.to_datetime(df.index[-1],format='%Y-%m-%d').strftime('until %b %d, %Y')
## Debug Print
print 'Dataframe:'; print df
plot(df, start, end, archive_an, start_unix, end_unix, args.collector, args.dpi, args.tex)
return
## Open the output diff file and count the diff occurrences of an ASN per timestamp
diff_df = pandas.read_csv(dump_diff_file, sep='|', dtype=str, index_col=False, \
header=None, names=list('ABCDEFGHIKL'))
diff_df = diff_df[['A','F']]; diff_df.columns = ['Timestamp', 'ASN']
diff_df['Timestamp'] = diff_df['Timestamp'].str.split(' ', expand=False).str[1]
diff_df['Timestamp'] = diff_df['Timestamp'].fillna(method='ffill')
diff_df = diff_df[diff_df['ASN'].notnull()]
diff_df = diff_df.groupby(['Timestamp', 'ASN']).size().reset_index(name='Diff-Count')
diff_df = diff_df.set_index(['Timestamp', 'ASN'])
## Open the output file and count the total occurrences of an ASN per timestamp
df = pandas.read_csv(dump_file, sep='|', dtype=str, index_col=False, \
header=None, names=list('ABCDEFGHIKL'))
df = df[['A','F']]; df.columns = ['Timestamp', 'ASN']
df['Timestamp'] = df['Timestamp'].str.split(' ', expand=False).str[1]
df['Timestamp'] = df['Timestamp'].fillna(method='ffill')
df = df[df['ASN'].notnull()]
df = df.groupby(['Timestamp', 'ASN']).size().reset_index(name='Total-Count')
df = df.set_index(['Timestamp', 'ASN'])
## Calculate the ratio for every timestamp and ASN and max count for ASN in a day
result = pandas.concat([diff_df, df], axis=1, join='inner')
result['Ratio'] = result['Diff-Count']/result['Total-Count']
result = result.reset_index()
result['Day'] = pandas.to_datetime(result['Timestamp'],unit='s').dt.strftime('%Y-%m-%d')
max_df = result.groupby(['Day', 'ASN'], sort=False)['Ratio'].max().reset_index(name='Max-Count')
max_df = max_df.pivot(index='Day', columns='ASN', values='Max-Count')
max_df = max_df.mul(100)
start = pandas.to_datetime(max_df.index[0],format='%Y-%m-%d').strftime('%b %d ')
end = pandas.to_datetime(max_df.index[-1],format='%Y-%m-%d').strftime('until %b %d, %Y')
## Store dataframe as csv for only plotting purposes
max_df.to_csv(export_df, sep=',', encoding='utf-8')
## Debug Print
print max_df
plot(max_df, start, end, archive_an, start_unix, end_unix, args.collector, args.dpi, args.tex)
return
def plot(df, start, end, archive_an, start_unix, end_unix, collector, dpi, tex):
""" Plot the graph for differences related to the ASN over time """
## Set figure properties
figsize = (20, 10) if not int(tex) else (20, 12)
mpl.rcParams['figure.figsize'] = figsize; mpl.rcParams['figure.dpi'] = dpi
mpl.rcParams['figure.facecolor'] = 'w'; mpl.rcParams['figure.edgecolor'] = 'k'
if int(tex): mpl.rc('font',family='serif',serif='Latin Modern Roman',size=24)
## Plotting
ax = df.T.plot(kind='box', showmeans=True, sym='g+',
color={'boxes': 'k', 'medians': 'orange', 'whiskers': 'k'})
y = 1.02 if int(tex) else 1
plt.title('Impact on BGP - Differences between validation status per BGP-ASN - ' + start + end,y=y)
dates_lab = [pandas.to_datetime(str(x)).strftime('%Y-%m-%d') for x in df.T.columns.values]
if int(tex): dates_lab = [d[-5:] for d in dates_lab]
d_x = -.02 if not int(tex) else -.04; d_y = -.04 if not int(tex) else -.06
ax.spines['top'].set_alpha(0.25); ax.set_xticklabels(dates_lab, ha='center');
for label in ax.xaxis.get_ticklabels()[1::2]: label.set_visible(False)
ax.set_ylabel('Max. Differences - #Differences / #ASN entries [%]')
ax.set_xlabel('Timestamp'); ax.yaxis.grid(alpha=0.75);
## Export Plot
if not os.path.exists(archive_an + '/Plots/'): os.makedirs(archive_an + '/Plots/')
dump_pdf = str(start_unix) + '_' + str(end_unix) + '.pdf'
print '\nPlot: '+ archive_an + '/Plots/plots_bgp_impact_per_asn_' + dump_pdf
plt.gcf().savefig(archive_an + '/Plots/plots_bgp_impact_per_asn_' + dump_pdf,
bbox_inches='tight')
return
if __name__ == '__main__':
main()
|
mit
|
cloudera/ibis
|
ibis/backends/tests/test_client.py
|
1
|
6027
|
import pandas as pd
import pytest
from pkg_resources import parse_version
import ibis
import ibis.expr.datatypes as dt
@pytest.fixture
def new_schema():
return ibis.schema([('a', 'string'), ('b', 'bool'), ('c', 'int32')])
@pytest.mark.xfail_unsupported
def test_load_data_sqlalchemy(backend, con, temp_table):
if not isinstance(
con.dialect(), ibis.backends.base_sqlalchemy.alchemy.AlchemyDialect
):
pytest.skip(f'{backend} is not a SQL Alchemy Client.')
sch = ibis.schema(
[
('first_name', 'string'),
('last_name', 'string'),
('department_name', 'string'),
('salary', 'float64'),
]
)
df = pd.DataFrame(
{
'first_name': ['A', 'B', 'C'],
'last_name': ['D', 'E', 'F'],
'department_name': ['AA', 'BB', 'CC'],
'salary': [100.0, 200.0, 300.0],
}
)
con.create_table(temp_table, schema=sch)
con.load_data(temp_table, df, if_exists='append')
result = con.table(temp_table).execute()
backend.assert_frame_equal(df, result)
@pytest.mark.xfail_unsupported
def test_version(backend, con):
expected_type = (
type(parse_version('1.0')),
type(parse_version('1.0-legacy')),
)
assert isinstance(con.version, expected_type)
@pytest.mark.parametrize(
('expr_fn', 'expected'),
[
(lambda t: t.string_col, [('string_col', dt.String)]),
(
lambda t: t[t.string_col, t.bigint_col],
[('string_col', dt.String), ('bigint_col', dt.Int64)],
),
],
)
def test_query_schema(backend, con, alltypes, expr_fn, expected):
if not hasattr(con, '_build_ast'):
pytest.skip(
'{} backend has no _build_ast method'.format(
type(backend).__name__
)
)
expr = expr_fn(alltypes)
# we might need a public API for it
ast = con._build_ast(expr, backend.make_context())
query = con.query_class(con, ast)
schema = query.schema()
# clickhouse columns has been defined as non-nullable
# whereas other backends don't support non-nullable columns yet
expected = ibis.schema(
[
(name, dtype(nullable=schema[name].nullable))
for name, dtype in expected
]
)
assert query.schema().equals(expected)
@pytest.mark.parametrize(
'sql',
[
'select * from functional_alltypes limit 10',
'select * from functional_alltypes \nlimit 10\n',
],
)
@pytest.mark.xfail_backends(('bigquery',))
@pytest.mark.xfail_unsupported
def test_sql(backend, con, sql):
if not hasattr(con, 'sql') or not hasattr(con, '_get_schema_using_query'):
pytest.skip('Backend {} does not support sql method'.format(backend))
# execute the expression using SQL query
con.sql(sql).execute()
# test table
@pytest.mark.xfail_unsupported
def test_create_table_from_schema(con, backend, new_schema, temp_table):
if not hasattr(con, 'create_table') or not hasattr(con, 'drop_table'):
pytest.xfail(
'{} backend doesn\'t have create_table or drop_table methods.'
)
con.create_table(temp_table, schema=new_schema)
t = con.table(temp_table)
for k, i_type in t.schema().items():
assert new_schema[k] == i_type
@pytest.mark.xfail_unsupported
def test_rename_table(con, backend, temp_table, new_schema):
if not hasattr(con, 'rename_table'):
pytest.xfail('{} backend doesn\'t have rename_table method.')
temp_table_original = '{}_original'.format(temp_table)
con.create_table(temp_table_original, schema=new_schema)
t = con.table(temp_table_original)
t.rename(temp_table)
assert con.table(temp_table) is not None
assert temp_table in con.list_tables()
@pytest.mark.xfail_unsupported
@pytest.mark.xfail_backends(['impala', 'pyspark', 'spark'])
def test_nullable_input_output(con, backend, temp_table):
# - Impala, PySpark and Spark non-nullable issues #2138 and #2137
if not hasattr(con, 'create_table') or not hasattr(con, 'drop_table'):
pytest.xfail(
'{} backend doesn\'t have create_table or drop_table methods.'
)
sch = ibis.schema(
[
('foo', 'int64'),
('bar', ibis.expr.datatypes.int64(nullable=False)),
('baz', 'boolean*'),
]
)
con.create_table(temp_table, schema=sch)
t = con.table(temp_table)
assert t.schema().types[0].nullable
assert not t.schema().types[1].nullable
assert t.schema().types[2].nullable
# view tests
@pytest.mark.xfail_unsupported
@pytest.mark.xfail_backends(['pyspark', 'spark'])
def test_create_drop_view(con, backend, temp_view):
# pyspark and spark skipt because table actually is a temporary view
if not hasattr(con, 'create_view') or not hasattr(con, 'drop_view'):
pytest.xfail(
'{} backend doesn\'t have create_view or drop_view methods.'
)
# setup
table_name = 'functional_alltypes'
expr = con.table(table_name).limit(1)
# create a new view
con.create_view(temp_view, expr)
# check if the view was created
assert temp_view in con.list_tables()
t_expr = con.table(table_name)
v_expr = con.table(temp_view)
# check if the view and the table has the same fields
assert set(t_expr.schema().names) == set(v_expr.schema().names)
@pytest.mark.only_on_backends(
['bigquery', 'clickhouse', 'impala', 'omniscidb', 'spark'],
reason="run only if backend is sql-based",
)
def test_separate_database(con, alternate_current_database, current_data_db):
# using alternate_current_database switches "con" current
# database to a temporary one until a test is over
tmp_db = con.database(alternate_current_database)
# verifying we can open another db which isn't equal to current
db = con.database(current_data_db)
assert db.name == current_data_db
assert tmp_db.name == alternate_current_database
|
apache-2.0
|
shahankhatch/scikit-learn
|
sklearn/utils/graph.py
|
289
|
6239
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.