repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lifei96/Medium_Crawler | User_Crawler/util_graph.py | 2 | 21354 | # -*- coding: utf-8 -*-
import snap
import pandas as pd
import numpy as np
import os
import json
from operator import *
eps = 1e-10
date_list = ['20130101', '20130701', '20140101', '20140701', '20150101', '20150701', '20160101', '20160701']
def load_graph(file_path):
H = snap.TStrIntSH()
Graph = snap.LoadConnListStr(snap.PNGraph, file_path, H)
print "-----graph loaded"
return Graph, H
def load_username_map(file_path='./data/graph/graph.dat'):
Graph, H = load_graph(file_path)
return H
def load_graph_from_edge_list(input_path):
return snap.LoadEdgeList(snap.PNGraph, input_path, 0, 1)
def save_graph_to_edge_list(Graph, output_path):
snap.SaveEdgeList(Graph, output_path)
def convert_to_undirected(in_Graph):
return snap.ConvertGraph(snap.PUNGraph, in_Graph)
def load_user_attr_to_df(file_path):
return pd.read_csv(file_path, encoding='utf-8', engine='python')
def get_labeled_graph(file_path, output_path_graph, output_path_hash):
Graph, H = load_graph(file_path)
with open(output_path_graph, 'w') as f:
print '-----clear'
with open(output_path_hash, 'w') as f:
print '-----clear'
f_graph = open(output_path_graph, 'a')
f_hash = open(output_path_hash, 'a')
for NI in Graph.Nodes():
ID = NI.GetId()
f_hash.write('%d %s\n' % (ID, H.GetKey(ID)))
f_graph.write('%d' % ID)
for des in NI.GetOutEdges():
f_graph.write(' %d' % des)
f_graph.write('\n')
print ID
def get_labeled_graph_for_CC(file_path, output_path_graph, output_path_hash):
Graph, H = load_graph(file_path)
with open(output_path_graph, 'w') as f:
print '-----clear'
with open(output_path_hash, 'w') as f:
print '-----clear'
f_graph = open(output_path_graph, 'a')
f_hash = open(output_path_hash, 'a')
for NI in Graph.Nodes():
ID = NI.GetId()
f_hash.write('%d %s\n' % (ID, H.GetKey(ID)))
f_graph.write('%d %d' % (ID, NI.GetOutDeg()))
for des in NI.GetOutEdges():
f_graph.write(' %d' % des)
f_graph.write('\n')
print ID
def get_labeled_graph_for_louvain(file_path, output_path_graph, output_path_hash):
Graph, H = load_graph(file_path)
with open(output_path_graph, 'w') as f:
print '-----clear'
with open(output_path_hash, 'w') as f:
print '-----clear'
f_graph = open(output_path_graph, 'a')
f_hash = open(output_path_hash, 'a')
for NI in Graph.Nodes():
ID = NI.GetId()
f_hash.write('%d %s\n' % (ID, H.GetKey(ID)))
for des in NI.GetOutEdges():
f_graph.write('%d %d\n' % (ID, des))
print ID
def get_labeled_LSCC_for_paths(file_path, output_path_LSCC, output_path_hash):
Graph, H = load_graph(file_path)
MxScc = snap.GetMxScc(Graph)
with open(output_path_LSCC, 'w') as f:
print '-----clear'
with open(output_path_hash, 'w') as f:
print '-----clear'
f_graph = open(output_path_LSCC, 'a')
f_hash = open(output_path_hash, 'a')
for NI in MxScc.Nodes():
ID = NI.GetId()
f_hash.write('%d %s\n' % (ID, H.GetKey(ID)))
for des in NI.GetOutEdges():
f_graph.write('%d %d\n' % (ID, des))
print ID
def merge_CC_result(path_CC, path_hash, output_path):
CC = dict()
dataset = list()
with open(path_CC, 'r') as f:
raw_data = f.read().split('\n')
for line in raw_data:
if line != '':
ID, cc = map(eval, line.split())
CC[ID] = cc
with open(path_hash, 'r') as f:
raw_data = f.read().split('\n')
for line in raw_data:
if line != '':
ID, username = line.split()
ID = eval(ID)
dataset.append({'username': username, 'CC': CC[ID]})
dataset = pd.DataFrame(dataset)
dataset = dataset[['username', 'CC']]
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_graph_info(file_path, output_path):
Graph, H = load_graph(file_path)
snap.PrintInfo(Graph, 'Python type PNGraph', output_path, False)
def graph_cleaning(file_path):
Graph, H = load_graph(file_path)
Graph = snap.GetMxWcc(Graph)
snap.DelSelfEdges(Graph)
nodes_set = set()
for NI in Graph.Nodes():
nodes_set.add(NI.GetId())
with open(file_path, 'r') as f:
raw_list = f.read().split('\n')
edges_list = [edge_str.split() for edge_str in raw_list]
with open(file_path, 'w') as f:
print '-----clear'
with open(file_path, 'a') as f:
for edge in edges_list:
if len(edge) == 0:
continue
if H.GetKeyId(edge[0]) not in nodes_set:
continue
edge_cleaned = list()
for node in edge:
if H.GetKeyId(node) in nodes_set:
edge_cleaned.append(node)
f.write(' '.join(edge_cleaned) + '\n')
def get_pagerank_by_date(date_list=date_list):
for date in date_list:
get_pagerank_from_edge_list('./data/graph/Graph_%s.txt' % date, './data/graph/pr_%s.csv' % date)
def get_pagerank_from_edge_list(file_path, output_path):
Graph = load_graph_from_edge_list(file_path)
H = load_username_map()
_get_pagerank(Graph, H, output_path)
def get_pagerank(file_path, output_path):
Graph, H = load_graph(file_path)
_get_pagerank(Graph, H, output_path)
def _get_pagerank(Graph, H, output_path):
PRankH = snap.TIntFltH()
snap.GetPageRank(Graph, PRankH)
pr_list = list()
for ID in PRankH:
pr_list.append({'username': H.GetKey(ID), 'PR': PRankH[ID]})
dataset = pd.DataFrame(pr_list)
dataset = dataset[['username', 'PR']]
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_degree_by_date(date_list=date_list):
for date in date_list:
get_degree_from_edge_list('./data/graph/Graph_%s.txt' % date, './data/graph/degree_%s.csv' % date)
def get_degree_from_edge_list(file_path, output_path):
Graph = load_graph_from_edge_list(file_path)
H = load_username_map()
_get_degree_in_graph(Graph, H, output_path)
def _get_degree_in_graph(Graph, H, output_path):
InDegV = snap.TIntPrV()
snap.GetNodeInDegV(Graph, InDegV)
InDeg_set = dict()
for item in InDegV:
username = H.GetKey(item.GetVal1())
InDeg = item.GetVal2()
InDeg_set[username] = InDeg
OutDegV = snap.TIntPrV()
snap.GetNodeOutDegV(Graph, OutDegV)
OutDeg_set = dict()
for item in OutDegV:
username = H.GetKey(item.GetVal1())
OutDeg = item.GetVal2()
OutDeg_set[username] = OutDeg
dataset = list()
tot = len(InDeg_set)
num = 0
for username in InDeg_set:
user_degree = dict()
user_degree['username'] = username
user_degree['in_degree'] = InDeg_set[username]
user_degree['out_degree'] = OutDeg_set[username]
profile_path = './data/Users/%s.json' % username
if not os.path.exists(profile_path):
continue
with open(profile_path, 'r') as f:
profile = json.load(f)
in_set = set(profile['followers'])
out_set = set(profile['following'])
if user_degree['out_degree'] == 0:
user_degree['balance'] = float(user_degree['in_degree']) / eps
else:
user_degree['balance'] = float(user_degree['in_degree']) / float(user_degree['out_degree'])
bi = 0
for out_username in out_set:
if out_username in in_set:
try:
ID = H.GetDat(out_username)
if ID is not -1 and Graph.IsNode(ID):
bi += 1
except Exception as e:
print type(e)
print e.args
print e
if user_degree['out_degree'] == 0:
user_degree['reciprocity'] = float(bi) / eps
else:
user_degree['reciprocity'] = float(bi) / float(user_degree['out_degree'])
dataset.append(user_degree)
num += 1
print '%d/%d' % (num, tot)
dataset = pd.DataFrame(dataset)
dataset = dataset[['username', 'in_degree', 'out_degree', 'balance', 'reciprocity']]
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_degree_in_graph(file_path, output_path):
Graph, H = load_graph(file_path)
_get_degree_in_graph(Graph, H, output_path)
def get_degree(file_path, output_path):
Graph, H = load_graph(file_path)
_get_degree(Graph, H, output_path)
def _get_degree(Graph, H, output_path):
InDegV = snap.TIntPrV()
snap.GetNodeInDegV(Graph, InDegV)
InDeg_set = dict()
for item in InDegV:
username = H.GetKey(item.GetVal1())
InDeg = item.GetVal2()
InDeg_set[username] = InDeg
OutDegV = snap.TIntPrV()
snap.GetNodeOutDegV(Graph, OutDegV)
OutDeg_set = dict()
for item in OutDegV:
username = H.GetKey(item.GetVal1())
OutDeg = item.GetVal2()
OutDeg_set[username] = OutDeg
dataset = list()
tot = len(InDeg_set)
num = 0
for username in InDeg_set:
user_degree = dict()
user_degree['username'] = username
user_degree['in_degree'] = InDeg_set[username]
user_degree['out_degree'] = OutDeg_set[username]
profile_path = './data/Users/%s.json' % username
if not os.path.exists(profile_path):
continue
with open(profile_path, 'r') as f:
profile = json.load(f)
if 'socialStats' in profile['profile']['user']:
user_degree['in_degree'] = max(user_degree['in_degree'], profile['profile']['user']['socialStats']['usersFollowedByCount'])
user_degree['out_degree'] = max(user_degree['out_degree'], profile['profile']['user']['socialStats']['usersFollowedCount'])
in_set = set(profile['followers'])
out_set = set(profile['following'])
user_degree['in_degree'] = max(user_degree['in_degree'], len(in_set))
user_degree['out_degree'] = max(user_degree['out_degree'], len(out_set))
if user_degree['out_degree'] == 0:
user_degree['balance'] = float(user_degree['in_degree']) / eps
else:
user_degree['balance'] = float(user_degree['in_degree']) / float(user_degree['out_degree'])
bi = 0
for out_username in out_set:
if out_username in in_set:
bi += 1
if user_degree['out_degree'] == 0:
user_degree['reciprocity'] = float(bi) / eps
else:
user_degree['reciprocity'] = float(bi) / float(user_degree['out_degree'])
dataset.append(user_degree)
num += 1
print '%d/%d' % (num, tot)
dataset = pd.DataFrame(dataset)
dataset = dataset[['username', 'in_degree', 'out_degree', 'balance', 'reciprocity']]
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_CC_by_date(date_list=date_list):
for date in date_list:
get_CC_from_edge_list('./data/graph/Graph_%s.txt' % date, './data/graph/cc_%s.csv' % date)
def get_CC_from_edge_list(file_path, output_path):
Graph = load_graph_from_edge_list(file_path)
H = load_username_map()
_get_CC(Graph, H, output_path)
def get_CC(file_path, output_path):
Graph, H = load_graph(file_path)
_get_CC(Graph, H, output_path)
def _get_CC(Graph, H, output_path):
NIdCCfH = snap.TIntFltH()
snap.GetNodeClustCf(Graph, NIdCCfH)
dataset = list()
for ID in NIdCCfH:
CC = dict()
CC['username'] = H.GetKey(ID)
CC['CC'] = NIdCCfH[ID]
dataset.append(CC)
dataset = pd.DataFrame(dataset)
dataset = dataset[['username', 'CC']]
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_SCC_by_date(date_list=date_list):
for date in date_list:
get_SCC_from_edge_list('./data/graph/Graph_%s.txt' % date, './data/graph/scc_%s.csv' % date)
def get_SCC_from_edge_list(file_path, output_path):
Graph = load_graph_from_edge_list(file_path)
H = load_username_map()
_get_SCC(Graph, H, output_path)
def get_SCC(file_path, output_path):
Graph, H = load_graph(file_path)
_get_SCC(Graph, H, output_path)
def _get_SCC(Graph, H, output_path):
ComponentDist = snap.TIntPrV()
snap.GetSccSzCnt(Graph, ComponentDist)
dataset = list()
for comp in ComponentDist:
scc = dict()
scc['size'] = comp.GetVal1()
scc['freq'] = comp.GetVal2()
dataset.append(scc)
dataset = pd.DataFrame(dataset)
dataset = dataset[['size', 'freq']]
dataset.sort('size', ascending=0, inplace=True)
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_shortest_path(file_path, output_path):
Graph, H = load_graph(file_path)
path_distr = dict()
MxScc = snap.GetMxScc(Graph)
tot = MxScc.GetNodes()
cnt = 0
for NI in MxScc.Nodes():
NIdToDistH = snap.TIntH()
shortestPath = snap.GetShortPath(MxScc, NI.GetId(), NIdToDistH, True)
for ID in NIdToDistH:
dist = NIdToDistH[ID]
if dist in path_distr:
path_distr[dist] += 1
else:
path_distr[dist] = 1
cnt += 1
print '%d/%d' % (cnt, tot)
dataset = list()
for dist in path_distr:
distr = dict()
distr['dist'] = dist
distr['freq'] = path_distr[dist]
dataset.append(distr)
dataset = pd.DataFrame(dataset)
dataset = dataset[['dist', 'freq']]
dataset.sort('dist', ascending=1, inplace=True)
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_comm(node2comm_path, hash_path, output_path_user2comm, output_path_comm_size):
with open(node2comm_path, 'r') as f:
raw_data = f.read().split('\n')
ID2comm = dict()
for line in raw_data:
if line == '':
continue
ID, comm_no = map(eval, line.split())
ID2comm[ID] = comm_no
user2comm = list()
comm = dict()
with open(hash_path, 'r') as f:
raw_data = f.read().split('\n')
for line in raw_data:
if line != '':
ID, username = line.split()
ID = eval(ID)
user2comm.append({'username': username, 'comm': ID2comm[ID]})
if ID2comm[ID] in comm:
comm[ID2comm[ID]] += 1
else:
comm[ID2comm[ID]] = 1
tot = len(user2comm)
comm_size = list()
for comm_no in comm:
comm_size.append({'comm': comm_no, 'size': comm[comm_no], 'perc': 1.0 * comm[comm_no] / tot})
user2comm = pd.DataFrame(user2comm)
user2comm = user2comm[['username', 'comm']]
user2comm.to_csv(output_path_user2comm, index=False, encoding='utf-8')
comm_size = pd.DataFrame(comm_size)
comm_size = comm_size[['comm', 'size', 'perc']]
comm_size.sort('size', ascending=0, inplace=True)
comm_size.to_csv(output_path_comm_size, index=False, encoding='utf-8')
def get_comm_edge(graph_path, node2comm_path, output_path):
comm_intra = dict()
comm_inter = dict()
node2comm = dict()
comm_size = dict()
with open(node2comm_path, 'r') as f:
raw_data = f.read().split('\n')
for line in raw_data:
if line == '':
continue
node, comm = map(eval, line.split())
node2comm[node] = comm
if comm in comm_size:
comm_size[comm] += 1
else:
comm_size[comm] = 1
with open(graph_path, 'r') as f:
raw_data = f.read().split('\n')
for line in raw_data:
if line == '':
continue
src, des = map(eval, line.split())
if node2comm[src] == node2comm[des]:
if node2comm[src] in comm_intra:
comm_intra[node2comm[src]] += 1
else:
comm_intra[node2comm[src]] = 1
else:
if node2comm[src] in comm_inter:
comm_inter[node2comm[src]] += 1
else:
comm_inter[node2comm[src]] = 1
if node2comm[des] in comm_inter:
comm_inter[node2comm[des]] += 1
else:
comm_inter[node2comm[des]] = 1
dataset = list()
for comm in comm_size:
dataset.append({'comm': comm, 'size': comm_size[comm], 'intra_avg': 1.0 * comm_intra[comm] / comm_size[comm], 'inter_avg': 1.0 * comm_inter[comm] / comm_size[comm]})
dataset = pd.DataFrame(dataset)
dataset = dataset[['comm', 'size', 'intra_avg', 'inter_avg']]
dataset.sort('size', ascending=0, inplace=True)
dataset.to_csv(output_path, index=False, encoding='utf-8')
def get_robustness(file_path, LSCC_output_path, LWCC_output_path):
frac_list = [0.0001, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
Graph, H = load_graph(file_path)
InDegV = snap.TIntPrV()
snap.GetNodeInDegV(Graph, InDegV)
OutDegV = snap.TIntPrV()
snap.GetNodeOutDegV(Graph, OutDegV)
degree = dict()
for item in InDegV:
ID = item.GetVal1()
InDeg = item.GetVal2()
degree[ID] = InDeg
for item in OutDegV:
ID = item.GetVal1()
OutDeg = item.GetVal2()
degree[ID] += OutDeg
sorted_degree = sorted(degree.items(), key=itemgetter(1), reverse=True)
tot = len(sorted_degree)
pos = [int(tot * frac) for frac in frac_list]
print pos
cur = 0
LSCC_robust = list()
LWCC_robust = list()
for i in range(tot):
Graph.DelNode(sorted_degree[i][0])
if i == pos[cur] - 1:
LSCC_frac = snap.GetMxSccSz(Graph)
LWCC_frac = snap.GetMxWccSz(Graph)
singleton_frac = 1.0 - 1.0 * snap.CntNonZNodes(Graph) / Graph.GetNodes()
LSCC_robust.append({'removed': frac_list[cur], 'singleton': singleton_frac, 'middle': 1.0 - singleton_frac - LSCC_frac, 'LSCC': LSCC_frac})
LWCC_robust.append({'removed': frac_list[cur], 'singleton': singleton_frac, 'middle': 1.0 - singleton_frac - LWCC_frac, 'LWCC': LWCC_frac})
cur += 1
if cur >= len(pos):
break
LSCC_robust = pd.DataFrame(LSCC_robust)
LSCC_robust = LSCC_robust[['removed', 'singleton', 'middle', 'LSCC']]
LSCC_robust.to_csv(LSCC_output_path, index=False, encoding='utf-8')
LWCC_robust = pd.DataFrame(LWCC_robust)
LWCC_robust = LWCC_robust[['removed', 'singleton', 'middle', 'LWCC']]
LWCC_robust.to_csv(LWCC_output_path, index=False, encoding='utf-8')
def get_community_CNM(file_path, output_path):
Graph, H = load_graph(file_path)
Graph = convert_to_undirected(Graph)
CmtyV = snap.TCnComV()
modularity = snap.CommunityCNM(Graph, CmtyV)
output_str = 'Modularity: ' + str(modularity) + '\nNum of communities: ' + str(len(CmtyV)) + '\nCommunities:\n'
for Cmty in CmtyV:
output_str += str(len(Cmty)) + '\n'
with open(output_path, 'w') as f:
f.write(output_str)
def get_graph_by_month(graph_path, date_username_path):
date_username_df = load_user_attr_to_df(date_username_path)
date_username_df.sort_values(by='created_date', ascending=False, inplace=True)
print 'date_username loaded'
Graph, H = load_graph(graph_path)
cur_date = ''
print date_username_df['created_date']
for idx, row in date_username_df.iterrows():
if Graph.GetNodes() < 30000:
break
print row['created_date']
try:
if cur_date == '':
cur_date = str(row['created_date'])
if cur_date[-2:] == '01' and str(row['created_date']) != cur_date:
snap.SaveEdgeList(Graph, './data/graph/Graph_%s.txt' % cur_date)
cur_date = str(row['created_date'])
username = row['username']
Node_ID = H.GetDat(username)
Graph.DelNode(Node_ID)
except Exception as e:
print '!'
print e
def get_avg_cc_degree(cc_file_path, degree_file_path, output_path):
cc_df = load_user_attr_to_df(cc_file_path)
degree_df = load_user_attr_to_df(degree_file_path)
df = pd.DataFrame(pd.concat([cc_df, degree_df], axis=1, join='inner'))
cc_degree_dict = {}
for idx, row in df.iterrows():
if 0 <= row['CC'] < 1:
degree = row['in_degree'] + row['out_degree']
if degree in cc_degree_dict:
cc_degree_dict[degree].append(row['CC'])
else:
cc_degree_dict[degree] = []
cc_degree_dict[degree].append(row['CC'])
degree_list = []
avg_cc_list = []
for degree in cc_degree_dict:
degree_list.append(degree)
avg_cc_list.append(np.mean(cc_degree_dict[degree]))
avg_cc_df = pd.DataFrame()
avg_cc_df['degree'] = degree_list
avg_cc_df['avg_cc'] = avg_cc_list
avg_cc_df.sort(columns='degree', ascending=True, inplace=True)
avg_cc_df.to_csv(output_path, index=False, encoding='utf-8')
def get_pagerank_percentage(percentage=0.01):
df = load_user_attr_to_df('./data/graph/pagerank.csv')
pr_list = df['PR'].tolist()
sorted_pr_list = sorted(pr_list, reverse=True)
print sorted_pr_list[int(percentage * len(sorted_pr_list))]
| mit |
ghislainv/deforestprob | forestatrisk/predict_raster.py | 1 | 6494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# author :Ghislain Vieilledent
# email :[email protected], [email protected]
# web :https://ecology.ghislainv.fr
# python_version :>=2.7
# license :GPLv3
# ==============================================================================
# Import
from __future__ import division, print_function # Python 3 compatibility
import os
import sys
from glob import glob
import numpy as np
import pandas as pd
from osgeo import gdal
from .miscellaneous import rescale
from .miscellaneous import progress_bar, makeblock
# predict_raster
def predict_raster(model, var_dir="data",
input_forest_raster="data/forest.tif",
output_file="predictions.tif",
blk_rows=128, **kwargs):
"""Predict the spatial probability of deforestation from a model.
This function predicts the spatial probability of deforestation
from a model_binomial_iCAR model. Computation are done by block and
can be performed on large geographical areas.
:param model: model_binomial_iCAR model to predict from.
:param var_dir: directory with rasters (.tif) of explicative variables.
:param input_forest_raster: path to forest raster (1 for forest).
:param output_file: name of the output raster file for predictions.
:param blk_rows: if > 0, number of rows for computation by block.
:param **kwargs: additional arguments to be passed to model.predict().
:return: a raster of predictions.
"""
# Mask on forest
fmaskR = gdal.Open(input_forest_raster)
fmaskB = fmaskR.GetRasterBand(1)
# Landscape variables from forest raster
gt = fmaskR.GetGeoTransform()
ncol = fmaskR.RasterXSize
nrow = fmaskR.RasterYSize
Xmin = gt[0]
Xmax = gt[0] + gt[1] * ncol
Ymin = gt[3] + gt[5] * nrow
Ymax = gt[3]
# Raster list
var_tif = var_dir + "/*.tif"
raster_list = glob(var_tif)
raster_list.sort() # Sort names
raster_names = []
for i in range(len(raster_list)):
fname = os.path.basename(raster_list[i])
index_dot = fname.index(".")
raster_names.append(fname[:index_dot])
var_names = raster_names
var_names.extend(["X", "Y", "fmask"])
# Make vrt with gdalbuildvrt
print("Make virtual raster with variables as raster bands")
param = gdal.BuildVRTOptions(resolution="user",
outputBounds=(Xmin, Ymin, Xmax, Ymax),
xRes=gt[1], yRes=-gt[5],
separate=True)
gdal.BuildVRT("/vsimem/var.vrt", raster_list, options=param)
stack = gdal.Open("/vsimem/var.vrt")
nband = stack.RasterCount
proj = stack.GetProjection()
# List of nodata values
bandND = np.zeros(nband)
for k in range(nband):
band = stack.GetRasterBand(k + 1)
bandND[k] = band.GetNoDataValue()
if (bandND[k] is None) or (bandND[k] is np.nan):
print("NoData value is not specified for"
" input raster file {}".format(k))
sys.exit(1)
bandND = bandND.astype(np.float32)
# Make blocks
blockinfo = makeblock("/vsimem/var.vrt", blk_rows=blk_rows)
nblock = blockinfo[0]
nblock_x = blockinfo[1]
x = blockinfo[3]
y = blockinfo[4]
nx = blockinfo[5]
ny = blockinfo[6]
print("Divide region in {} blocks".format(nblock))
# Raster of predictions
print("Create a raster file on disk for projections")
driver = gdal.GetDriverByName("GTiff")
Pdrv = driver.Create(output_file, ncol, nrow, 1,
gdal.GDT_UInt16,
["COMPRESS=LZW", "PREDICTOR=2", "BIGTIFF=YES"])
Pdrv.SetGeoTransform(gt)
Pdrv.SetProjection(proj)
Pband = Pdrv.GetRasterBand(1)
Pband.SetNoDataValue(0)
# Predict by block
# Message
print("Predict deforestation probability by block")
# Loop on blocks of data
for b in range(nblock):
# Progress bar
progress_bar(nblock, b + 1)
# Position in 1D-arrays
px = b % nblock_x
py = b // nblock_x
# Number of pixels
npix = nx[px] * ny[py]
# Data for one block of the stack (shape = (nband,nrow,ncol))
data = stack.ReadAsArray(x[px], y[py], nx[px], ny[py])
# Replace ND values with -9999
for i in range(nband):
data[i][np.nonzero(data[i] == bandND[i])] = -9999
# Coordinates of the center of the pixels of the block
X_col = gt[0] + x[px] * gt[1] \
+ (np.arange(nx[px]) + 0.5) * gt[1] # +0.5 for center of pixels
X = np.repeat(X_col[np.newaxis, :], ny[py], axis=0)
X = X[np.newaxis, :, :]
Y_row = gt[3] + y[py] * gt[5] \
+ (np.arange(ny[py]) + 0.5) * gt[5] # +0.5 for center of pixels
Y = np.repeat(Y_row[:, np.newaxis], nx[px], axis=1)
Y = Y[np.newaxis, :, :]
# Forest mask
fmaskA = fmaskB.ReadAsArray(x[px], y[py], nx[px], ny[py])
fmaskA = fmaskA.astype(np.float32) # From uint to float
fmaskA[np.nonzero(fmaskA != 1)] = -9999
fmaskA = fmaskA[np.newaxis, :, :]
# Concatenate forest mask with stack
data = np.concatenate((data, X, Y, fmaskA), axis=0)
# Transpose and reshape to 2D array
data = data.transpose(1, 2, 0)
data = data.reshape(npix, nband + 3)
# Observations without NA
w = np.nonzero(~(data == -9999).any(axis=1))
# Remove observations with NA
data = data[w]
# Transform into a pandas DataFrame
df = pd.DataFrame(data)
df.columns = var_names
# Predict
pred = np.zeros(npix) # Initialize with nodata value (0)
if len(w[0]) > 0:
# Get predictions into an array
p = model.predict(df, **kwargs)
# Rescale and return to pred
pred[w] = rescale(p)
# Assign prediction to raster
pred = pred.reshape(ny[py], nx[px])
Pband.WriteArray(pred, x[px], y[py])
# Compute statistics
print("Compute statistics")
Pband.FlushCache() # Write cache data to disk
Pband.ComputeStatistics(False)
# Build overviews
print("Build overviews")
Pdrv.BuildOverviews("nearest", [4, 8, 16, 32])
# Dereference driver
Pband = None
del(Pdrv)
# End
| gpl-3.0 |
linebp/pandas | pandas/tests/dtypes/test_concat.py | 15 | 3058 | # -*- coding: utf-8 -*-
import pandas as pd
import pandas.core.dtypes.concat as _concat
class TestConcatCompat(object):
def check_concat(self, to_concat, exp):
for klass in [pd.Index, pd.Series]:
to_concat_klass = [klass(c) for c in to_concat]
res = _concat.get_dtype_kinds(to_concat_klass)
assert res == set(exp)
def test_get_dtype_kinds(self):
to_concat = [['a'], [1, 2]]
self.check_concat(to_concat, ['i', 'object'])
to_concat = [[3, 4], [1, 2]]
self.check_concat(to_concat, ['i'])
to_concat = [[3, 4], [1, 2.1]]
self.check_concat(to_concat, ['i', 'f'])
def test_get_dtype_kinds_datetimelike(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'])]
self.check_concat(to_concat, ['datetime'])
to_concat = [pd.TimedeltaIndex(['1 days']),
pd.TimedeltaIndex(['2 days'])]
self.check_concat(to_concat, ['timedelta'])
def test_get_dtype_kinds_datetimelike_object(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'], tz='US/Eastern')]
self.check_concat(to_concat,
['datetime', 'datetime64[ns, US/Eastern]'])
to_concat = [pd.DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
pd.DatetimeIndex(['2011-01-02'], tz='US/Eastern')]
self.check_concat(to_concat,
['datetime64[ns, Asia/Tokyo]',
'datetime64[ns, US/Eastern]'])
# timedelta has single type
to_concat = [pd.TimedeltaIndex(['1 days']),
pd.TimedeltaIndex(['2 hours'])]
self.check_concat(to_concat, ['timedelta'])
to_concat = [pd.DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
pd.TimedeltaIndex(['1 days'])]
self.check_concat(to_concat,
['datetime64[ns, Asia/Tokyo]', 'timedelta'])
def test_get_dtype_kinds_period(self):
# because we don't have Period dtype (yet),
# Series results in object dtype
to_concat = [pd.PeriodIndex(['2011-01'], freq='M'),
pd.PeriodIndex(['2011-01'], freq='M')]
res = _concat.get_dtype_kinds(to_concat)
assert res == set(['period[M]'])
to_concat = [pd.Series([pd.Period('2011-01', freq='M')]),
pd.Series([pd.Period('2011-02', freq='M')])]
res = _concat.get_dtype_kinds(to_concat)
assert res == set(['object'])
to_concat = [pd.PeriodIndex(['2011-01'], freq='M'),
pd.PeriodIndex(['2011-01'], freq='D')]
res = _concat.get_dtype_kinds(to_concat)
assert res == set(['period[M]', 'period[D]'])
to_concat = [pd.Series([pd.Period('2011-01', freq='M')]),
pd.Series([pd.Period('2011-02', freq='D')])]
res = _concat.get_dtype_kinds(to_concat)
assert res == set(['object'])
| bsd-3-clause |
francesco-mannella/dmp-esn | DMP/stulp/src/bbo/plotting/plotEvolutionaryOptimization.py | 2 | 12262 | import sys
import numpy
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
import os
import matplotlib.pyplot as pl
import time
#from matplotlib import animation
from plotUpdateSummary import plotUpdateSummaryFromDirectory
def plotUpdateLines(n_samples_per_update,ax,y_limits=[]):
if (len(y_limits)==0):
y_limits = ax.get_ylim()
# Find good number of horizontal update lines to plot
updates = numpy.arange(0, len(n_samples_per_update))
while len(n_samples_per_update)>20:
n_samples_per_update = n_samples_per_update[0:-1:5]
updates = updates[0:-1:5]
ax.plot([n_samples_per_update, n_samples_per_update],y_limits,'-',color='#bbbbbb',linewidth=0.5,zorder=0)
for ii in range(len(n_samples_per_update)-1):
y = y_limits[0] + 0.9*(y_limits[1]-y_limits[0])
ax.text(n_samples_per_update[ii+1], y,str(updates[ii+1]),
horizontalalignment='center',verticalalignment='top',rotation='vertical')
y = y_limits[0] + 0.95*(y_limits[1]-y_limits[0])
ax.text(mean(ax.get_xlim()), y,'number of updates',
horizontalalignment='center', verticalalignment='top')
ax.set_ylim(y_limits)
def loadNumberOfUpdates(directory):
n_updates = 0;
dir_exists = True;
while (dir_exists):
n_updates+=1
cur_directory = '%s/update%05d' % (directory, n_updates)
dir_exists = os.path.isdir(cur_directory)
n_updates-=1
return n_updates
def loadLearningCurve(directory):
n_updates = loadNumberOfUpdates(directory)
costs_all = []
costs_eval = []
update_at_samples = []
has_noise_free_eval = False;
for update in range(n_updates):
cur_directory = '%s/update%05d' % (directory, update+1)
# Load costs of individual samples
cur_costs = np.loadtxt(cur_directory+"/costs.txt")
costs_all.extend(cur_costs)
update_at_samples.append(len(costs_all))
try:
# Load evaluation cost, if it exists
cur_cost_eval = np.loadtxt(cur_directory+"/cost_eval.txt")
costs_eval.append(np.atleast_1d(cur_cost_eval)[0])
has_noise_free_eval = True
except IOError:
# If there was no noise-free evaluation, take the mean of this epoch
costs_eval.append(mean(cur_costs))
if (has_noise_free_eval):
# The noise-free evaluations are done exactly at the updates
eval_at_samples = [0];
eval_at_samples.extend(update_at_samples[:-1])
else:
# The means are centered between two updates
n = [0]
n.extend(update_at_samples)
eval_at_samples = [0.5*(n[i] + n[i+1]) for i in range(len(n)-1)]
return (update_at_samples, costs_all, eval_at_samples, costs_eval)
def loadExplorationCurve(directory,i_parallel=-1):
n_updates = loadNumberOfUpdates(directory)
suffix=""
if (i_parallel>=0):
suffix = '_%02d' % i_parallel
# Load all the covar matrices
covar_at_samples = [0]
covars_per_update = [];
for update in range(n_updates):
cur_directory = '%s/update%05d/' % (directory, update+1)
covar = np.loadtxt(cur_directory+"/distribution_covar"+suffix+".txt")
covars_per_update.append(covar)
cur_costs = np.loadtxt(cur_directory+"/costs.txt")
covar_at_samples.append(covar_at_samples[-1]+len(cur_costs))
# Load final covar matrix
covar = np.loadtxt(cur_directory+"/distribution_new_covar"+suffix+".txt")
covars_per_update.append(covar)
# Compute sqrt of max of eigenvalues
sqrt_max_eigvals = [];
for update in range(len(covars_per_update)):
if (isnan(covars_per_update[update]).any()):
print update
print covars_per_update[update]
print "Found nan in covariance matrix..."
eigvals, eigvecs = np.linalg.eig(covars_per_update[update])
sqrt_max_eigvals.append(sqrt(max(eigvals)))
return (covar_at_samples, sqrt_max_eigvals)
def computeMeanCostsPerUpdateDeprecated(n_samples_per_update,costs_per_sample):
# Take the mean of the costs per update
# (first, compute the center of each update)
n = n_samples_per_update
centers_n_samples_per_update = [0.5*(n[i] + n[i+1]) for i in range(len(n)-1)]
mean_samples_per_update = [ np.mean(costs_per_sample[n[i]:n[i+1]]) for i in range(len(n)-1)]
return (centers_n_samples_per_update, mean_samples_per_update)
def plotLearningCurves(all_eval_at_samples,all_costs_eval,ax):
# Check if all n_samples_per_update are the same. Otherwise averaging doesn't make sense.
std_samples = all_eval_at_samples.std(0)
if (sum(std_samples)>0.0001):
print "WARNING: updates must be the same"
eval_at_samples = all_eval_at_samples[0];
# Compute average and standard deviation for learning and exploration curves
mean_costs = all_costs_eval.mean(0)
std_costs = all_costs_eval.std(0)
# Plot costs of all individual samples
line_mean = ax.plot(eval_at_samples,mean_costs,'-',color='blue',linewidth=2)
line_std_plus = ax.plot(eval_at_samples,mean_costs+std_costs,'-',color='blue',linewidth=1)
line_std_min = ax.plot(eval_at_samples,mean_costs-std_costs,'-',color='blue',linewidth=1)
ax.set_xlabel('number of evaluations')
ax.set_ylabel('cost')
ax.set_title('Learning curve')
return (line_mean, line_std_plus, line_std_min)
def plotLearningCurve(samples_eval,costs_eval,ax,costs_all=[]):
# Plot costs of all individual samples
if (len(costs_all)>0):
ax.plot(costs_all,'.',color='gray')
# Plot costs at evaluations
line = ax.plot(samples_eval,costs_eval,'-',color='blue',linewidth=2)
ax.set_xlabel('number of evaluations')
ax.set_ylabel('cost')
ax.set_title('Learning curve')
y_limits = [0,1.2*max(costs_eval)];
ax.set_ylim(y_limits)
return line
def plotExplorationCurves(all_covar_at_samples,all_exploration_curves,ax):
# Check if all n_samples_per_update are the same. Otherwise averaging doesn't make sense.
std_samples = all_covar_at_samples.std(0)
if (sum(std_samples)>0.0001):
print "WARNING: updates must be the same"
covar_at_samples = all_covar_at_samples[0];
# Compute average and standard deviation for learning and exploration curves
mean_curve = all_exploration_curves.mean(0)
std_curve = all_exploration_curves.std(0)
x = covar_at_samples
line_mean = ax.plot(x,mean_curve,'-',color='green',linewidth=2)
line_std_plus = ax.plot(x,mean_curve+std_curve,'-',color='green',linewidth=1)
line_std_min = ax.plot(x,mean_curve-std_curve,'-',color='green',linewidth=1)
ax.set_xlabel('number of evaluations')
ax.set_ylabel('sqrt of max. eigval of covar')
ax.set_title('Exploration magnitude')
return (line_mean, line_std_plus, line_std_min)
def plotExplorationCurve(n_samples_per_update,exploration_curve,ax):
line = ax.plot(n_samples_per_update,exploration_curve,'-',color='green',linewidth=2)
ax.set_xlabel('number of evaluations')
ax.set_ylabel('sqrt of max. eigval of covar')
ax.set_title('Exploration magnitude')
return line
def plotEvolutionaryOptimization(directory,axs,plot_all_rollouts=False):
#################################
# Load and plot learning curve
(update_at_samples, costs_all, eval_at_samples, costs_eval) = loadLearningCurve(directory)
ax = (None if axs==None else axs[1])
plotLearningCurve(eval_at_samples,costs_eval,ax,costs_all)
#y_limits = [0,1.2*max(learning_curve)];
plotUpdateLines(update_at_samples,ax)
#################################
# Load and plot exploration curve
(covar_at_samples, sqrt_max_eigvals) = loadExplorationCurve(directory)
ax = (None if axs==None else axs[0])
plotExplorationCurve(covar_at_samples,sqrt_max_eigvals,ax)
plotUpdateLines(update_at_samples,ax)
n_updates = loadNumberOfUpdates(directory)
ax = (None if axs==None else axs[2])
if (ax!=None):
#################################
# Visualize the update in parameter space
for update in range(n_updates):
cur_directory = '%s/update%05d' % (directory, update+1)
plotUpdateSummaryFromDirectory(cur_directory,ax,False)
cur_directory = '%s/update%05d' % (directory, n_updates)
plotUpdateSummaryFromDirectory(cur_directory,ax,True)
ax.set_title('Search space')
if ( (axs!=None) and (len(axs)>3) ):
ax = axs[3];
rollouts_python_script = directory+'/plotRollouts.py'
if (os.path.isfile(rollouts_python_script)):
lib_path = os.path.abspath(directory)
sys.path.append(lib_path)
from plotRollouts import plotRollouts
for update in range(n_updates):
cur_directory = '%s/update%05d' % (directory, update+1)
filename = "/cost_vars_eval.txt"
if plot_all_rollouts:
filename = "/cost_vars.txt"
cost_vars = np.loadtxt(cur_directory+filename)
rollout_lines = plotRollouts(cost_vars,ax)
color_val = (1.0*update/n_updates)
#cur_color = [1.0-0.9*color_val,0.1+0.9*color_val,0.1]
cur_color = [0.0-0.0*color_val, 0.0+1.0*color_val, 0.0-0.0*color_val]
#print str(update)+" "+str(n_updates)+" "+str(cur_color)
plt.setp(rollout_lines,color=cur_color)
if (update==0):
plt.setp(rollout_lines,color='r',linewidth=2)
def plotEvolutionaryOptimizations(directories,axs):
n_updates = 10000000
for directory in directories:
cur_n_updates = loadNumberOfUpdates(directory)
n_updates = min([cur_n_updates, n_updates])
n_dirs = len(directories)
all_costs_eval = np.empty((n_dirs,n_updates), dtype=float)
all_eval_at_samples = np.empty((n_dirs,n_updates), dtype=float)
for dd in range(len(directories)):
(update_at_samples, tmp, eval_at_samples, costs_eval) = loadLearningCurve(directories[dd])
all_costs_eval[dd] = costs_eval
all_eval_at_samples[dd] = eval_at_samples
ax = axs[1]
lines_lc = plotLearningCurves(all_eval_at_samples,all_costs_eval,ax)
plotUpdateLines(update_at_samples,ax)
all_sqrt_max_eigvals = np.empty((n_dirs,n_updates+1), dtype=float)
all_covar_at_samples = np.empty((n_dirs,n_updates+1), dtype=float)
for dd in range(len(directories)):
(covar_at_samples, sqrt_max_eigvals) = loadExplorationCurve(directories[dd])
all_sqrt_max_eigvals[dd] = sqrt_max_eigvals[:n_updates+1]
all_covar_at_samples[dd] = covar_at_samples[:n_updates+1]
ax = axs[0]
lines_ec = plotExplorationCurves(all_covar_at_samples,all_sqrt_max_eigvals,ax)
plotUpdateLines(update_at_samples,ax)
return (lines_ec, lines_lc)
if __name__=='__main__':
# See if input directory was passed
if (len(sys.argv)<2):
print '\nUsage: '+sys.argv[0]+' <directory> [directory2] [directory3] [etc]\n';
sys.exit()
directories = []
for arg in sys.argv[1:]:
directories.append(str(arg))
if (len(directories)==1):
rollouts_python_script = directories[0]+'/plotRollouts.py'
if (os.path.isfile(rollouts_python_script)):
lib_path = os.path.abspath(directories[0])
sys.path.append(lib_path)
from plotRollouts import plotRollouts
fig = plt.figure(1,figsize=(12, 4))
axs = [ fig.add_subplot(143), fig.add_subplot(144), fig.add_subplot(142) , fig.add_subplot(141) ]
else:
fig = plt.figure(1,figsize=(9, 4))
axs = [ fig.add_subplot(132), fig.add_subplot(133), fig.add_subplot(131) ]
plotEvolutionaryOptimization(directories[0],axs)
else:
fig = plt.figure(1,figsize=(6, 4))
axs = [ fig.add_subplot(121), fig.add_subplot(122) ]
plotEvolutionaryOptimizations(directories,axs)
plt.show()
| gpl-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/cross_validation.py | 7 | 72106 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .utils.random import choice
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneOut` instead.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePOut` instead.
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.KFold` instead.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupKFold` instead.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedKFold` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneGroupOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePGroupsOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ShuffleSplit` instead.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedShuffleSplit` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
for n in range(self.n_iter):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(cls_count, self.n_train, rng)
class_counts_remaining = cls_count - n_i
t_i = _approximate_mode(class_counts_remaining, self.n_test, rng)
train = []
test = []
for i, _ in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
perm_indices_class_i = np.where(
(i == self.y_indices))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.PredefinedSplit` instead.
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupShuffleSplit` instead.
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_predict` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.check_cv` instead.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` is used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.permutation_test_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.train_test_split` instead.
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
lin-credible/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
DonaldMcRonald/SketchRecognitionWithTensorFlow | src/main/python/gui/plotter.py | 1 | 1523 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from recognition import sketch_utils
def draw_shape(plotter_instance, shape):
def stroke_func(points, stroke):
plot_stroke(plotter_instance, stroke)
return None
def shape_func(values, shape):
return None
sketch_utils.call_shape_recursively(shape_func=shape_func, stroke_func=stroke_func, srl_object=shape)
def plot_shape_as_points(plotter_instance, shape):
points = sketch_utils.create_points_from_shape(shape)
sketch_utils.strip_ids_from_points(points)
plot_point_list(plotter_instance, points)
def plot_stroke(plotter_instance, stroke):
points = sketch_utils.convert_points_to_array(stroke.points, stroke)
sketch_utils.strip_ids_from_points(points)
plot_point_list(plotter_instance=plotter_instance, points=points)
def plot_template(plotter_instance, template):
plot_shape_as_points(plotter_instance, template.shape)
def plot_point_list(plotter_instance, points):
if len(points) < 4:
return
plotter_instance.scatter(*zip(*points))
#plotter_instance.axis([minX, maxX, minY, maxY])
def save(plotter_instance, file, path=None):
#plotter_instance.ylim([0,20])
#plotter_instance.xlim([0,20])
if path is None:
path = 'test/'
plotter_instance.savefig(path + '/' + file)
plotter_instance.close()
def get_plotter_instance():
"""creates a new plot instance"""
plt.figure()
return plt
| mit |
GuessWhoSamFoo/pandas | pandas/tests/frame/test_validate.py | 2 | 1063 | import pytest
from pandas.core.frame import DataFrame
@pytest.fixture
def dataframe():
return DataFrame({'a': [1, 2], 'b': [3, 4]})
class TestDataFrameValidate(object):
"""Tests for error handling related to data types of method arguments."""
@pytest.mark.parametrize("func", ["query", "eval", "set_index",
"reset_index", "dropna",
"drop_duplicates", "sort_values"])
@pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(self, dataframe, func, inplace):
msg = "For argument \"inplace\" expected type bool"
kwargs = dict(inplace=inplace)
if func == "query":
kwargs["expr"] = "a > b"
elif func == "eval":
kwargs["expr"] = "a + b"
elif func == "set_index":
kwargs["keys"] = ["a"]
elif func == "sort_values":
kwargs["by"] = ["a"]
with pytest.raises(ValueError, match=msg):
getattr(dataframe, func)(**kwargs)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/groupby/test_transform.py | 7 | 22785 | """ test with the .transform """
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range
from pandas.core.dtypes.common import (
_ensure_platform_int, is_timedelta64_dtype)
from pandas.compat import StringIO
from pandas._libs import groupby
from .common import MixIn, assert_fp_equal
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import DataError
from pandas.core.config import option_context
class TestGroupBy(MixIn):
def test_transform(self):
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.TimeGrouper('M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast(self):
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
_ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index, name='val')
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
# GH 12737
df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
'd': pd.date_range('2014-1-1', '2014-1-4'),
'i': [1, 2, 3, 4]},
columns=['grouping', 'f', 'i', 'd'])
result = df.groupby('grouping').transform('first')
dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
'd': dates,
'i': [1, 2, 2, 4]},
columns=['f', 'i', 'd'])
assert_frame_equal(result, expected)
# selection
result = df.groupby('grouping')[['f', 'i']].transform('first')
expected = expected[['f', 'i']]
assert_frame_equal(result, expected)
# dup columns
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
result = df.groupby('g').transform('first')
expected = df.drop('g', axis=1)
assert_frame_equal(result, expected)
def test_transform_broadcast(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, self.ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = self.tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, self.tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in self.tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = self.tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, self.tsframe.index)
tm.assert_index_equal(result.columns, self.tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(self):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = self.tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype(self):
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug(self):
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta(self):
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = pd.Series([
Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
# this does date math without changing result type in transform
base_time = df['A'][0]
result = df.groupby('A')['A'].transform(
lambda x: x.max() - x.min() + base_time) - base_time
assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
assert_series_equal(result, expected)
def test_transform_datetime_to_numeric(self):
# GH 10972
# convert dt to float
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
expected = Series([-0.5, 0.5], name='b')
assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
expected = Series([0, 1], name='b')
assert_series_equal(result, expected)
def test_transform_casting(self):
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(StringIO(data), sep='\s+',
index_col=[0], parse_dates=['DATETIME'])
result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(self):
grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(self):
f = lambda x: x.mean()
result = self.df.groupby('A')['C', 'D'].transform(f)
selection = self.df[['C', 'D']]
expected = selection.groupby(self.df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(self):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = self.df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(self):
result = self.df.groupby('A').transform('mean')
expected = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = self.df.groupby('A')['C'].transform('mean')
expected = self.df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_series_fast_transform_date(self):
# GH 13191
df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
'd': pd.date_range('2014-1-1', '2014-1-4')})
result = df.groupby('grouping')['d'].transform('first')
dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-4')]
expected = pd.Series(dates, name='d')
assert_series_equal(result, expected)
def test_transform_length(self):
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_transform_coercion(self):
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
g = df.groupby('A')
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
assert_frame_equal(result, expected)
def test_groupby_transform_with_int(self):
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group(self):
# GH 9941
df = pd.DataFrame({'a': range(10),
'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)['a'].transform(max)
expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
name='a')
assert_series_equal(result, expected)
def test_transform_mixed_type(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
assert result['d'].dtype == np.float64
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.loc[key])
def test_cython_group_transform_algos(self):
# GH 4095
dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
np.uint64, np.float32, np.float64]
ops = [(groupby.group_cumprod_float64, np.cumproduct, [np.float64]),
(groupby.group_cumsum, np.cumsum, dtypes)]
is_datetimelike = False
for pd_op, np_op, dtypes in ops:
for dtype in dtypes:
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
def test_cython_transform(self):
# GH 4095
ops = [(('cumprod',
()), lambda x: x.cumprod()), (('cumsum', ()),
lambda x: x.cumsum()),
(('shift', (-1, )),
lambda x: x.shift(-1)), (('shift',
(1, )), lambda x: x.shift())]
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for (op, args), targop in ops:
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(expected,
data.groupby(labels).transform(op,
*args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50})
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_group_selection()
for (op, args), targop in ops:
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply seperately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(expected, getattr(gb, op)(*args))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
pytest.raises(DataError, gb[c].transform, op)
pytest.raises(DataError, getattr(gb[c], op))
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group(self):
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
tm.assert_raises_regex(ValueError, 'transform must return '
'a scalar value for each '
'group.*',
df.groupby(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
| mit |
equialgo/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
kashif/scikit-learn | sklearn/datasets/svmlight_format.py | 29 | 16073 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
Adai0808/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
abhishekraok/LayeredNeuralNetwork | layeredneuralnetwork/school/frequency.py | 1 | 1297 | """
Teaches frequency domanain calculations.
"""
from layeredneuralnetwork.classifier_interface import ClassifierInterface
import math
import numpy as np
from sklearn.model_selection import train_test_split
class Frequency:
@staticmethod
def teach_all_frequency(classifier):
"""
Will teach to identify sin(x) and cos(x) of all frequencies.
:type classifier: ClassifierInterface
"""
N = classifier.input_dimension
positive_sample_count = 1000
X = np.random.randn(2 * positive_sample_count, N)
Y = np.random.randint(low=0, high=1, size=[2 * positive_sample_count])
scores = []
base_labels = ['cos_', 'sin_']
for base_label in base_labels:
for f in range(N):
label = base_label + str(f)
wave = np.sin([f * i * (2 * math.pi) / N for i in range(N)])
X[:positive_sample_count, :] = np.array([wave, ] * positive_sample_count)
Y[:positive_sample_count] = np.ones(shape=positive_sample_count)
x_train, x_test, y_train, y_test = train_test_split(X, Y)
classifier.fit(x_train, y_train, label)
scores.append(classifier.score(x_test, y_test, label))
return np.mean(scores)
| mit |
Obus/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
cybernet14/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
CforED/Machine-Learning | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/IPython/core/shellapp.py | 7 | 16199 | # encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core import pylabtools
from IPython.utils import py3compat
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
colours.""",
"Disable using colors for info related things."
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns = Bool(True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
).tag(config=True)
pylab = CaselessStrEnum(backend_keys, allow_none=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
if self.shell is not None:
self.shell.user_ns = change['new']
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path"""
if sys.path[0] != '':
sys.path.insert(0, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warning("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s, matplotlib=%s", gui, backend)
if key == "auto":
print("Using matplotlib backend: %s" % backend)
else:
gui = r
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
extensions = self.default_extensions + self.extensions
if self.extra_extension:
extensions.append(self.extra_extension)
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
if self.reraise_ipython_extension_failures:
raise
msg = ("Error in loading extension: {ext}\n"
"Check your config files in {location}".format(
ext=ext,
location=self.profile_dir.location
))
self.log.warning(msg, exc_info=True)
except:
if self.reraise_ipython_extension_failures:
raise
self.log.warning("Unknown error in loading extensions:", exc_info=True)
def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
# Hide variables defined here from %who etc.
if self.hide_initial_ns:
self.shell.user_ns_hidden.update(self.shell.user_ns)
# command-line execution (ipython -i script.py, ipython -m module)
# should *not* be excluded from %whos
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
def _run_exec_lines(self):
"""Run lines of code in IPythonApp.exec_lines in the user's namespace."""
if not self.exec_lines:
return
try:
self.log.debug("Running code from IPythonApp.exec_lines...")
for line in self.exec_lines:
try:
self.log.info("Running code in user namespace: %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user "
"namespace: %s" % line)
self.shell.showtraceback()
except:
self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
self.shell.showtraceback()
def _exec_file(self, fname, shell_futures=False):
try:
full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError:
self.log.warning("File not found: %r"%fname)
return
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv
sys.argv = [full_filename] + self.extra_args[1:]
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
try:
if os.path.isfile(full_filename):
self.log.info("Running file in user namespace: %s" %
full_filename)
# Ensure that __file__ is always defined to match Python
# behavior.
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = fname
if full_filename.endswith('.ipy'):
self.shell.safe_execfile_ipy(full_filename,
shell_futures=shell_futures)
else:
# default to python, even without extension
self.shell.safe_execfile(full_filename,
self.shell.user_ns,
shell_futures=shell_futures,
raise_exceptions=True)
finally:
sys.argv = save_argv
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dir = self.profile_dir.startup_dir
startup_files = []
if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
not (self.file_to_run or self.code_to_run or self.module_to_run):
python_startup = os.environ['PYTHONSTARTUP']
self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
try:
self._exec_file(python_startup)
except:
self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
self.shell.showtraceback()
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling startup files:")
self.shell.showtraceback()
def _run_exec_files(self):
"""Run files from IPythonApp.exec_files"""
if not self.exec_files:
return
self.log.debug("Running files in IPythonApp.exec_files...")
try:
for fname in self.exec_files:
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling IPythonApp.exec_files:")
self.shell.showtraceback()
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
if not self.interact:
self.exit(1)
# Like Python itself, ignore the second if the first of these is present
elif self.file_to_run:
fname = self.file_to_run
if os.path.isdir(fname):
fname = os.path.join(fname, "__main__.py")
try:
self._exec_file(fname, shell_futures=True)
except:
self.shell.showtraceback(tb_offset=4)
if not self.interact:
self.exit(1)
def _run_module(self):
"""Run module specified at the command-line."""
if self.module_to_run:
# Make sure that the module gets a proper sys.argv as if it were
# run using `python -m`.
save_argv = sys.argv
sys.argv = [sys.executable] + self.extra_args
try:
self.shell.safe_run_module(self.module_to_run,
self.shell.user_ns)
finally:
sys.argv = save_argv
| apache-2.0 |
manashmndl/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
davidwaroquiers/pymatgen | pymatgen/analysis/interface_reactions.py | 5 | 22139 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides class to generate and analyze interfacial reactions.
"""
import warnings
import matplotlib.pylab as plt
import numpy as np
from pymatgen.core.composition import Composition
from pymatgen.analysis.phase_diagram import GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
__author__ = "Yihan Xiao"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Yihan Xiao"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Aug 15 2017"
class InterfacialReactivity:
"""
An object encompassing all relevant data for interface reactions.
"""
EV_TO_KJ_PER_MOL = 96.4853
def __init__(
self,
c1,
c2,
pd,
norm=True,
include_no_mixing_energy=False,
pd_non_grand=None,
use_hull_energy=False,
):
"""
Args:
c1 (Composition): Composition object for reactant 1.
c2 (Composition): Composition object for reactant 2.
pd (PhaseDiagram): PhaseDiagram object or GrandPotentialPhaseDiagram
object built from all elements in composition c1 and c2.
norm (bool): Whether or not the total number of atoms in composition
of reactant will be normalized to 1.
include_no_mixing_energy (bool): No_mixing_energy for a reactant is the
opposite number of its energy above grand potential convex hull. In
cases where reactions involve elements reservoir, this param
determines whether no_mixing_energy of reactants will be included
in the final reaction energy calculation. By definition, if pd is
not a GrandPotentialPhaseDiagram object, this param is False.
pd_non_grand (PhaseDiagram): PhaseDiagram object but not
GrandPotentialPhaseDiagram object built from elements in c1 and c2.
use_hull_energy (bool): Whether or not use the convex hull energy for
a given composition for reaction energy calculation. If false,
the energy of ground state structure will be used instead.
Note that in case when ground state can not be found for a
composition, convex hull energy will be used associated with a
warning message.
"""
self.grand = isinstance(pd, GrandPotentialPhaseDiagram)
# if include_no_mixing_energy is True, pd should be a
# GrandPotentialPhaseDiagram object and pd_non_grand should be given.
if include_no_mixing_energy and not self.grand:
raise ValueError("Please provide grand phase diagram to compute" " no_mixing_energy!")
if include_no_mixing_energy and not pd_non_grand:
raise ValueError("Please provide non-grand phase diagram to " "compute no_mixing_energy!")
if self.grand and use_hull_energy and not pd_non_grand:
raise ValueError("Please provide non-grand phase diagram if" " you want to use convex hull energy.")
# Keeps copy of original compositions.
self.c1_original = c1
self.c2_original = c2
# Two sets of composition attributes for two processing conditions:
# normalization with and without exluding element(s) from reservoir.
self.c1 = c1
self.c2 = c2
self.comp1 = c1
self.comp2 = c2
self.norm = norm
self.pd = pd
self.pd_non_grand = pd_non_grand
self.use_hull_energy = use_hull_energy
# Factor is the compositional ratio between composition self.c1 and
# processed composition self.comp1. E.g., factor for
# Composition('SiO2') and Composition('O') is 2.0. This factor will
# be used to convert mixing ratio in self.comp1 - self.comp2
# tie line to that in self.c1 - self.c2 tie line.
self.factor1 = 1
self.factor2 = 1
if self.grand:
# Excludes element(s) from reservoir.
self.comp1 = Composition({k: v for k, v in c1.items() if k not in pd.chempots})
self.comp2 = Composition({k: v for k, v in c2.items() if k not in pd.chempots})
# Calculate the factors in case where self.grand = True and
# self.norm = True.
factor1 = self.comp1.num_atoms / c1.num_atoms
factor2 = self.comp2.num_atoms / c2.num_atoms
if self.norm:
self.c1 = c1.fractional_composition
self.c2 = c2.fractional_composition
self.comp1 = self.comp1.fractional_composition
self.comp2 = self.comp2.fractional_composition
if self.grand:
# Only when self.grand = True and self.norm = True
# will self.factor be updated.
self.factor1 = factor1
self.factor2 = factor2
# Computes energies for reactants in different scenarios.
if not self.grand:
if self.use_hull_energy:
self.e1 = self.pd.get_hull_energy(self.comp1)
self.e2 = self.pd.get_hull_energy(self.comp2)
else:
# Use entry energy as reactant energy if no reservoir
# is present.
self.e1 = InterfacialReactivity._get_entry_energy(self.pd, self.comp1)
self.e2 = InterfacialReactivity._get_entry_energy(self.pd, self.comp2)
else:
if include_no_mixing_energy:
# Computing grand potentials needs compositions containing
# element(s) from reservoir, so self.c1 and self.c2 are used.
self.e1 = self._get_grand_potential(self.c1)
self.e2 = self._get_grand_potential(self.c2)
else:
self.e1 = self.pd.get_hull_energy(self.comp1)
self.e2 = self.pd.get_hull_energy(self.comp2)
@staticmethod
def _get_entry_energy(pd, composition):
"""
Finds the lowest entry energy for entries matching the composition.
Entries with non-negative formation energies are excluded. If no
entry is found, use the convex hull energy for the composition.
Args:
pd (PhaseDiagram): PhaseDiagram object.
composition (Composition): Composition object that the target
entry should match.
Returns:
The lowest entry energy among entries matching the composition.
"""
candidate = [
i.energy_per_atom
for i in pd.qhull_entries
if i.composition.fractional_composition == composition.fractional_composition
]
if not candidate:
warnings.warn(
"The reactant " + composition.reduced_formula + " has no matching entry with negative formation"
" energy, instead convex hull energy for this"
" composition will be used for reaction energy "
"calculation. "
)
return pd.get_hull_energy(composition)
min_entry_energy = min(candidate)
return min_entry_energy * composition.num_atoms
def _get_grand_potential(self, composition):
"""
Computes the grand potential Phi at a given composition and
chemical potential(s).
Args:
composition (Composition): Composition object.
Returns:
Grand potential at a given composition at chemical potential(s).
"""
if self.use_hull_energy:
grand_potential = self.pd_non_grand.get_hull_energy(composition)
else:
grand_potential = InterfacialReactivity._get_entry_energy(self.pd_non_grand, composition)
grand_potential -= sum([composition[e] * mu for e, mu in self.pd.chempots.items()])
if self.norm:
# Normalizes energy to the composition excluding element(s)
# from reservoir.
grand_potential /= sum([composition[el] for el in composition if el not in self.pd.chempots])
return grand_potential
def _get_energy(self, x):
"""
Computes reaction energy in eV/atom at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
Returns:
Reaction energy.
"""
return self.pd.get_hull_energy(self.comp1 * x + self.comp2 * (1 - x)) - self.e1 * x - self.e2 * (1 - x)
def _get_reaction(self, x):
"""
Generates balanced reaction at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
Returns:
Reaction object.
"""
mix_comp = self.comp1 * x + self.comp2 * (1 - x)
decomp = self.pd.get_decomposition(mix_comp)
# Uses original composition for reactants.
if np.isclose(x, 0):
reactant = [self.c2_original]
elif np.isclose(x, 1):
reactant = [self.c1_original]
else:
reactant = list(set([self.c1_original, self.c2_original]))
if self.grand:
reactant += [Composition(e.symbol) for e, v in self.pd.chempots.items()]
product = [Composition(k.name) for k, v in decomp.items()]
reaction = Reaction(reactant, product)
x_original = self._get_original_composition_ratio(reaction)
if np.isclose(x_original, 1):
reaction.normalize_to(self.c1_original, x_original)
else:
reaction.normalize_to(self.c2_original, 1 - x_original)
return reaction
def _get_elmt_amt_in_rxt(self, rxt):
"""
Computes total number of atoms in a reaction formula for elements
not in external reservoir. This method is used in the calculation
of reaction energy per mol of reaction formula.
Args:
rxt (Reaction): a reaction.
Returns:
Total number of atoms for non_reservoir elements.
"""
return sum([rxt.get_el_amount(e) for e in self.pd.elements])
def get_products(self):
"""
List of formulas of potential products. E.g., ['Li','O2','Mn'].
"""
products = set()
for _, _, _, react, _ in self.get_kinks():
products = products.union({k.reduced_formula for k in react.products})
return list(products)
@staticmethod
def _convert(x, factor1, factor2):
"""
Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1.
"""
return x * factor2 / ((1 - x) * factor1 + x * factor2)
@staticmethod
def _reverse_convert(x, factor1, factor2):
"""
Converts mixing ratio x in c1 - c2 tie line to that in
comp1 - comp2 tie line.
Args:
x (float): Mixing ratio x in c1 - c2 tie line, a float between
0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
"""
return x * factor1 / ((1 - x) * factor2 + x * factor1)
def get_kinks(self):
"""
Finds all the kinks in mixing ratio where reaction products changes
along the tie line of composition self.c1 and composition self.c2.
Returns:
Zip object of tuples (index, mixing ratio,
reaction energy per atom in eV/atom,
reaction formula,
reaction energy per mol of reaction
formula in kJ/mol).
"""
c1_coord = self.pd.pd_coords(self.comp1)
c2_coord = self.pd.pd_coords(self.comp2)
n1 = self.comp1.num_atoms
n2 = self.comp2.num_atoms
critical_comp = self.pd.get_critical_compositions(self.comp1, self.comp2)
x_kink, energy_kink, react_kink, energy_per_rxt_formula = [], [], [], []
if all(c1_coord == c2_coord):
x_kink = [0, 1]
energy_kink = [self._get_energy(x) for x in x_kink]
react_kink = [self._get_reaction(x) for x in x_kink]
num_atoms = [(x * self.comp1.num_atoms + (1 - x) * self.comp2.num_atoms) for x in x_kink]
energy_per_rxt_formula = [
energy_kink[i]
* self._get_elmt_amt_in_rxt(react_kink[i])
/ num_atoms[i]
* InterfacialReactivity.EV_TO_KJ_PER_MOL
for i in range(2)
]
else:
for i in reversed(critical_comp):
# Gets mixing ratio x at kinks.
c = self.pd.pd_coords(i)
x = np.linalg.norm(c - c2_coord) / np.linalg.norm(c1_coord - c2_coord)
# Modifies mixing ratio in case compositions self.comp1 and
# self.comp2 are not normalized.
x = x * n2 / (n1 + x * (n2 - n1))
n_atoms = x * self.comp1.num_atoms + (1 - x) * self.comp2.num_atoms
# Converts mixing ratio in comp1 - comp2 tie line to that in
# c1 - c2 tie line.
x_converted = InterfacialReactivity._convert(x, self.factor1, self.factor2)
x_kink.append(x_converted)
# Gets reaction energy at kinks
normalized_energy = self._get_energy(x)
energy_kink.append(normalized_energy)
# Gets balanced reaction at kinks
rxt = self._get_reaction(x)
react_kink.append(rxt)
rxt_energy = normalized_energy * self._get_elmt_amt_in_rxt(rxt) / n_atoms
energy_per_rxt_formula.append(rxt_energy * InterfacialReactivity.EV_TO_KJ_PER_MOL)
index_kink = range(1, len(critical_comp) + 1)
return zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula)
def get_critical_original_kink_ratio(self):
"""
Returns a list of molar mixing ratio for each kink between ORIGINAL
(instead of processed) reactant compositions. This is the
same list as mixing ratio obtained from get_kinks method
if self.norm = False.
Returns:
A list of floats representing molar mixing ratios between
the original reactant compositions for each kink.
"""
ratios = []
if self.c1_original == self.c2_original:
return [0, 1]
reaction_kink = [k[3] for k in self.get_kinks()]
for rxt in reaction_kink:
ratios.append(abs(self._get_original_composition_ratio(rxt)))
return ratios
def _get_original_composition_ratio(self, reaction):
"""
Returns the molar mixing ratio between the reactants with ORIGINAL (
instead of processed) compositions for a reaction.
Args:
reaction (Reaction): Reaction object that contains the original
reactant compositions.
Returns:
The molar mixing ratio between the original reactant
compositions for a reaction.
"""
if self.c1_original == self.c2_original:
return 1
c1_coeff = reaction.get_coeff(self.c1_original) if self.c1_original in reaction.reactants else 0
c2_coeff = reaction.get_coeff(self.c2_original) if self.c2_original in reaction.reactants else 0
return c1_coeff * 1.0 / (c1_coeff + c2_coeff)
def labels(self):
"""
Returns a dictionary containing kink information:
{index: 'x= mixing_ratio energy= reaction_energy reaction_equation'}.
E.g., {1: 'x= 0.0 energy = 0.0 Mn -> Mn',
2: 'x= 0.5 energy = -15.0 O2 + Mn -> MnO2',
3: 'x= 1.0 energy = 0.0 O2 -> O2'}.
"""
return {
j: "x= " + str(round(x, 4)) + " energy in eV/atom = " + str(round(energy, 4)) + " " + str(reaction)
for j, x, energy, reaction, _ in self.get_kinks()
}
def plot(self):
"""
Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x.
"""
plt.rcParams["xtick.major.pad"] = "6"
plt.rcParams["ytick.major.pad"] = "6"
plt.rcParams["axes.linewidth"] = 2
npoint = 1000
xs = np.linspace(0, 1, npoint)
# Converts sampling points in self.c1 - self.c2 tie line to those in
# self.comp1 - self.comp2 tie line.
xs_reverse_converted = InterfacialReactivity._reverse_convert(xs, self.factor1, self.factor2)
energies = [self._get_energy(x) for x in xs_reverse_converted]
plt.plot(xs, energies, "k-")
# Marks kinks and minimum energy point.
kinks = self.get_kinks()
_, x_kink, energy_kink, _, _ = zip(*kinks)
plt.scatter(x_kink, energy_kink, marker="o", c="blue", s=20)
plt.scatter(self.minimum()[0], self.minimum()[1], marker="*", c="red", s=300)
# Labels kinks with indices. Labels are made draggable
# in case of overlapping.
for index, x, energy, _, _ in kinks:
plt.annotate(
index,
xy=(x, energy),
xytext=(5, 30),
textcoords="offset points",
ha="right",
va="bottom",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0"),
).draggable()
plt.xlim([-0.05, 1.05])
if self.norm:
plt.ylabel("Energy (eV/atom)")
else:
plt.ylabel("Energy (eV/f.u.)")
plt.xlabel("$x$ in $x$ {} + $(1-x)$ {}".format(self.c1.reduced_formula, self.c2.reduced_formula))
return plt
def minimum(self):
"""
Finds the minimum reaction energy E_min and corresponding
mixing ratio x_min.
Returns:
Tuple (x_min, E_min).
"""
return min([(x, energy) for _, x, energy, _, _ in self.get_kinks()], key=lambda i: i[1])
def get_no_mixing_energy(self):
"""
Generates the opposite number of energy above grand potential
convex hull for both reactants.
Returns:
[(reactant1, no_mixing_energy1),(reactant2,no_mixing_energy2)].
"""
assert self.grand == 1, "Please provide grand potential phase diagram for computing no_mixing_energy!"
energy1 = self.pd.get_hull_energy(self.comp1) - self._get_grand_potential(self.c1)
energy2 = self.pd.get_hull_energy(self.comp2) - self._get_grand_potential(self.c2)
unit = "eV/f.u."
if self.norm:
unit = "eV/atom"
return [
(self.c1_original.reduced_formula + " ({0})".format(unit), energy1),
(self.c2_original.reduced_formula + " ({0})".format(unit), energy2),
]
@staticmethod
def get_chempot_correction(element, temp, pres):
"""
Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure.
"""
if element not in ["O", "N", "Cl", "F", "H"]:
return 0
std_temp = 298.15
std_pres = 1e5
ideal_gas_const = 8.3144598
# Cp and S at standard state in J/(K.mol). Data from
# https://janaf.nist.gov/tables/O-029.html
# https://janaf.nist.gov/tables/N-023.html
# https://janaf.nist.gov/tables/Cl-073.html
# https://janaf.nist.gov/tables/F-054.html
# https://janaf.nist.gov/tables/H-050.html
Cp_dict = {"O": 29.376, "N": 29.124, "Cl": 33.949, "F": 31.302, "H": 28.836}
S_dict = {"O": 205.147, "N": 191.609, "Cl": 223.079, "F": 202.789, "H": 130.680}
Cp_std = Cp_dict[element]
S_std = S_dict[element]
PV_correction = ideal_gas_const * temp * np.log(pres / std_pres)
TS_correction = (
-Cp_std * (temp * np.log(temp) - std_temp * np.log(std_temp))
+ Cp_std * (temp - std_temp) * (1 + np.log(std_temp))
- S_std * (temp - std_temp)
)
dG = PV_correction + TS_correction
# Convert to eV/molecule unit.
dG /= 1000 * InterfacialReactivity.EV_TO_KJ_PER_MOL
# Normalize by number of atoms in the gas molecule. For elements
# considered, the gas molecules are all diatomic.
dG /= 2
return dG
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/delaunay/triangulate.py | 8 | 10238 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
import warnings
import numpy as np
from matplotlib._delaunay import delaunay
from .interpolate import LinearInterpolator, NNInterpolator
from matplotlib.cbook import warn_deprecated
warn_deprecated('1.4',
name='matplotlib.delaunay',
alternative='matplotlib.tri.Triangulation',
obj_type='module')
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull
of the points and there is no neighbor on that edge. The values are
ordered such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also
in counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
Duplicate points.
If there are no duplicate points, Triangulation stores the specified
x and y arrays and there is no difference between the client's and
Triangulation's understanding of point indices used in edge_db,
triangle_nodes and hull.
If there are duplicate points, they are removed from the stored
self.x and self.y as the underlying delaunay code cannot deal with
duplicates. len(self.x) is therefore equal to len(x) minus the
number of duplicate points. Triangulation's edge_db, triangle_nodes
and hull refer to point indices in self.x and self.y, for internal
consistency within Triangulation and the corresponding Interpolator
classes. Client code must take care to deal with this in one of
two ways:
1. Ignore the x,y it specified in Triangulation's constructor and
use triangulation.x and triangulation.y instead, as these are
consistent with edge_db, triangle_nodes and hull.
2. If using the x,y the client specified then edge_db,
triangle_nodes and hull should be passed through the function
to_client_point_indices() first.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
duplicates = self._get_duplicate_point_indices()
if len(duplicates) > 0:
warnings.warn(
"Input data contains duplicate x,y points; some values are "
"ignored.",
DuplicatePointWarning,
)
# self.j_unique is the array of non-duplicate indices, in
# increasing order.
self.j_unique = np.delete(np.arange(len(self.x)), duplicates)
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
# If there are duplicate points, need a map of point indices used
# by delaunay to those used by client. If there are no duplicate
# points then the map is not needed. Either way, the map is
# conveniently the same as j_unique, so share it.
self._client_point_index_map = self.j_unique
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _get_duplicate_point_indices(self):
"""Return array of indices of x,y points that are duplicates of
previous points. Indices are in no particular order.
"""
# Indices of sorted x,y points.
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_duplicates = np.hstack([
False,
(np.diff(self.x[j_sorted]) == 0) &
(np.diff(self.y[j_sorted]) == 0),
])
# Array of duplicate point indices, in no particular order.
return j_sorted[mask_duplicates]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:, 0]][:, 1],
self.triangle_nodes[border[:, 0]][:, 2])))
edges.update(dict(zip(self.triangle_nodes[border[:, 1]][:, 2],
self.triangle_nodes[border[:, 1]][:, 0])))
edges.update(dict(zip(self.triangle_nodes[border[:, 2]][:, 0],
self.triangle_nodes[border[:, 2]][:, 1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def to_client_point_indices(self, array):
"""Converts any array of point indices used within this class to
refer to point indices within the (x,y) arrays specified in the
constructor before duplicates were removed.
"""
if self._client_point_index_map is not None:
return self._client_point_index_map[array]
else:
return array
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx - minx) / 2, (maxy - miny) / 2)
midx = (minx + maxx) / 2.0
midy = (miny + maxy) / 2.0
xp, yp = np.array([[midx + 3 * M, midx, midx - 3 * M],
[midy, midy + 3 * M, midy - 3 * M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:, 0] = self.x
xy1[:, 1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp * c[0] + yp * c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| mit |
janhahne/nest-simulator | pynest/nest/tests/test_visualization.py | 5 | 6999 | # -*- coding: utf-8 -*-
#
# test_visualization.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for visualization functions.
"""
import os
import unittest
import nest
import numpy as np
try:
import matplotlib.pyplot as plt
tmp_fig = plt.figure() # make sure we can open a window; DISPLAY may not be set
plt.close(tmp_fig)
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
try:
import pydot
HAVE_PYDOT = True
except ImportError:
HAVE_PYDOT = False
try:
import pandas
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
class VisualizationTestCase(unittest.TestCase):
def nest_tmpdir(self):
"""Returns temp dir path from environment, current dir otherwise."""
if 'NEST_DATA_PATH' in os.environ:
return os.environ['NEST_DATA_PATH']
else:
return '.'
def setUp(self):
self.filenames = []
def tearDown(self):
for filename in self.filenames:
# Cleanup temporary datafiles
os.remove(filename)
@unittest.skipIf(not HAVE_PYDOT, 'pydot not found')
def test_plot_network(self):
"""Test plot_network"""
import nest.visualization as nvis
nest.ResetKernel()
sources = nest.Create('iaf_psc_alpha', 10)
targets = nest.Create('iaf_psc_alpha', 10)
nest.Connect(sources, targets)
filename = os.path.join(self.nest_tmpdir(), 'network_plot.png')
self.filenames.append(filename)
nvis.plot_network(sources + targets, filename)
self.assertTrue(os.path.isfile(filename), 'Plot was not created or not saved')
def voltage_trace_verify(self, device):
self.assertIsNotNone(plt._pylab_helpers.Gcf.get_active(), 'No active figure')
ax = plt.gca()
vm = device.get('events', 'V_m')
for ref_vm, line in zip((vm[::2], vm[1::2]), ax.lines):
x_data, y_data = line.get_data()
# Check that times are correct
self.assertEqual(list(x_data), list(np.unique(device.get('events', 'times'))))
# Check that voltmeter data corresponds to the lines in the plot
self.assertTrue(all(np.isclose(ref_vm, y_data)))
plt.close(ax.get_figure())
@unittest.skipIf(not PLOTTING_POSSIBLE, 'Plotting impossible because matplotlib or display missing')
def test_voltage_trace_from_device(self):
"""Test voltage_trace from device"""
import nest.voltage_trace as nvtrace
nest.ResetKernel()
nodes = nest.Create('iaf_psc_alpha', 2)
pg = nest.Create('poisson_generator', 1, {'rate': 1000.})
device = nest.Create('voltmeter')
nest.Connect(pg, nodes)
nest.Connect(device, nodes)
nest.Simulate(100)
# Test with data from device
nest.voltage_trace.from_device(device)
self.voltage_trace_verify(device)
# Test with fata from file
vm = device.get('events')
data = np.zeros([len(vm['senders']), 3])
data[:, 0] = vm['senders']
data[:, 1] = vm['times']
data[:, 2] = vm['V_m']
filename = os.path.join(self.nest_tmpdir(), 'voltage_trace.txt')
self.filenames.append(filename)
np.savetxt(filename, data)
nest.voltage_trace.from_file(filename)
self.voltage_trace_verify(device)
def spike_detector_data_setup(self, to_file=False):
nest.ResetKernel()
pg = nest.Create('poisson_generator', {'rate': 1000.})
sd = nest.Create('spike_detector')
if to_file:
parrot = nest.Create('parrot_neuron')
sd_to_file = nest.Create('spike_detector')
sd_to_file.record_to = 'ascii'
nest.Connect(pg, parrot)
nest.Connect(parrot, sd)
nest.Connect(parrot, sd_to_file)
nest.Simulate(100)
return sd, sd_to_file
else:
nest.Simulate(100)
return sd
def spike_detector_raster_verify(self, sd_ref):
self.assertIsNotNone(plt._pylab_helpers.Gcf.get_active(), 'No active figure')
fig = plt.gcf()
axs = fig.get_axes()
x_data, y_data = axs[0].lines[0].get_data()
plt.close(fig)
# Have to use isclose() because of round-off errors
self.assertEqual(x_data.shape, sd_ref.shape)
self.assertTrue(all(np.isclose(x_data, sd_ref)))
@unittest.skipIf(not PLOTTING_POSSIBLE, 'Plotting impossible because matplotlib or display missing')
def test_raster_plot(self):
"""Test raster_plot"""
import nest.raster_plot as nraster
sd, sd_to_file = self.spike_detector_data_setup(to_file=True)
spikes = sd.get('events')
sd_ref = spikes['times']
# Test from_device
nest.raster_plot.from_device(sd)
self.spike_detector_raster_verify(sd_ref)
# Test from_data
data = np.zeros([len(spikes['senders']), 2])
data[:, 0] = spikes['senders']
data[:, 1] = spikes['times']
nest.raster_plot.from_data(data)
self.spike_detector_raster_verify(sd_ref)
# Test from_file
filename = sd_to_file.filenames[0]
self.filenames.append(filename)
nest.raster_plot.from_file(filename)
self.spike_detector_raster_verify(sd_ref)
# Test from_file_numpy
nest.raster_plot.from_file_numpy([filename])
self.spike_detector_raster_verify(sd_ref)
if HAVE_PANDAS:
# Test from_file_pandas
nest.raster_plot.from_file_pandas([filename])
self.spike_detector_raster_verify(sd_ref)
# Test extract_events
all_extracted = nest.raster_plot.extract_events(data)
times_30_to_40_extracted = nest.raster_plot.extract_events(data, time=[30., 40.], sel=[3])
source_2_extracted = nest.raster_plot.extract_events(data, sel=[2])
self.assertTrue(np.array_equal(all_extracted, data))
self.assertTrue(np.all(times_30_to_40_extracted[:, 1] >= 30.))
self.assertTrue(np.all(times_30_to_40_extracted[:, 1] < 40.))
self.assertEqual(len(source_2_extracted), 0)
def suite():
suite = unittest.makeSuite(VisualizationTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| gpl-2.0 |
dvspirito/pymeasure | pymeasure/experiment/config.py | 1 | 3003 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import configparser
import logging
import os
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def set_file(filename):
os.environ['CONFIG'] = filename
def get_config(filename='default_config.ini'):
if 'CONFIG' in os.environ.keys():
filename = os.environ['CONFIG']
config = configparser.ConfigParser()
config.read(filename)
return config
# noinspection PyProtectedMember
def set_mpl_rcparams(config):
if 'matplotlib.rcParams' in config._sections.keys():
import matplotlib
for key in config._sections['matplotlib.rcParams']:
matplotlib.rcParams[key] = eval(config._sections['matplotlib.rcParams'][key])
| mit |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cocoaagg.py | 70 | 8970 | from __future__ import division
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except:
print >>sys.stderr, 'The CococaAgg backend required PyObjC to be installed!'
print >>sys.stderr, ' (currently testing v1.3.7)'
sys.exit()
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasCocoaAgg(thisFig)
return FigureManagerCocoaAgg(canvas, num)
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(0,0),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_press_event(loc.x, loc.y, button)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print >>sys.stderr, 'Unable to load Matplotlib Cocoa UI!'
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print >>sys.stderr, 'ApplicationServices missing'
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, 'Missing', fn
return False
err, psn = d['GetCurrentProcess']()
if err:
print >>sys.stderr, 'GetCurrentProcess', (err, psn)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print >>sys.stderr, 'CPSSetProcessName', (err, psn)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print >>sys.stderr, 'SetFrontProcess', (err, psn)
return False
return True
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| mit |
phdowling/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
macks22/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
sagivba/MachineLearningUtils | Tests/UnitTests/ModelUtils_UnitTest.py | 1 | 6284 | import unittest
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from MachineLearningUtils.DatasetTools import DatasetsTools
from MachineLearningUtils.ModelsUtils import ModelUtils
class ModelUtilsTestCase(unittest.TestCase):
def setUp(self):
ds = DatasetsTools(datasets.load_iris)
self.iris_df = ds.data_as_df(target_column_name="IrisClass")
self.boton_df = DatasetsTools(datasets.load_boston).data_as_df()
self.tree_clf = DecisionTreeClassifier(max_depth=5, min_samples_split=10, min_samples_leaf=10)
self.prd_lbl = "PrdictedIrisClass"
self.actl_lbl = "IrisClass"
self.columns_lst = list(self.iris_df)
self.columns_lst.pop(-1)
self.mu = ModelUtils(df=self.iris_df, model=self.tree_clf, columns_lst=self.columns_lst,
predicted_lbl=self.prd_lbl, actual_lbl=self.actl_lbl)
def test__set_df(self):
mu = self.mu
df = mu._set_df(None)
iris_df = self.iris_df
self.assertEquals(list(df), list(iris_df))
boton_df = self.boton_df
df = mu._set_df(boton_df)
self.assertEquals(list(df), list(boton_df))
def test__set_train_df(self):
mu = self.mu
df = mu._set_train_df(None)
iris_df = self.iris_df
self.assertEquals(list(df), list(iris_df))
boton_df = DatasetsTools(datasets.load_boston).data_as_df()
df = mu._set_train_df(boton_df)
self.assertEquals(list(df), list(boton_df))
def test__set_test_df(self):
mu = self.mu
df = mu._set_test_df(None)
iris_df = self.iris_df
self.assertEquals(list(df), list(iris_df))
boton_df = DatasetsTools(datasets.load_boston).data_as_df()
df = mu._set_test_df(boton_df)
self.assertEquals(list(df), list(boton_df))
def test_init(self):
# df == None
self.assertRaises(ValueError,
lambda: ModelUtils(df=None, model=self.tree_clf, columns_lst=self.columns_lst,
predicted_lbl=self.prd_lbl,
actual_lbl=self.actl_lbl)
)
# # clf == None
self.assertRaises(ValueError,
lambda: ModelUtils(df=self.iris_df, model=None, columns_lst=self.columns_lst,
predicted_lbl=self.prd_lbl,
actual_lbl=self.actl_lbl),
)
# clf missing
self.assertRaises(ValueError,
lambda: ModelUtils(df=self.iris_df, predicted_lbl=self.prd_lbl, actual_lbl=self.actl_lbl)
)
mu = ModelUtils(df=self.iris_df, model=self.tree_clf, predicted_lbl=self.prd_lbl, actual_lbl=self.actl_lbl)
self.assertIsInstance(mu, ModelUtils)
def test_train_test_split(self):
mu = self.mu
self.assertIsInstance(mu, ModelUtils)
# dfualt split
train_df, test_df = mu.split_data_to_train_test()
train_shape = train_df.shape
self.assertTrue(train_shape[0] > 70)
self.assertTrue(train_shape[1] == 5)
# split another df
train_df, test_df = mu.split_data_to_train_test(self.boton_df)
train_shape = train_df.shape
self.assertTrue(train_shape[0] > 350)
self.assertTrue(train_shape[1] == 14)
# split another test size
train_df, test_df = mu.split_data_to_train_test(self.boton_df, test_size=0.5)
train_shape = train_df.shape
self.assertTrue(train_shape[0] == list(self.boton_df.shape)[0] * 0.5)
self.assertTrue(train_shape[1] == 14)
def test_train_model_simple(self):
mu = self.mu
self.assertIsInstance(mu, ModelUtils)
train_df, test_df = mu.split_data_to_train_test()
# simple train
trained_df = mu.train_model()
trained_shape = trained_df.shape
self.assertEqual(trained_shape[0] * 1.0, list(self.iris_df.shape)[0] * 0.7)
expexted_columns = ['sepal_length_cm', 'sepal_width_cm', 'petal_length_cm', 'petal_width_cm', 'IrisClass',
'PrdictedIrisClass']
self.assertEquals(list(trained_df), expexted_columns)
def test_train_model_diffrent_df(self):
# train on diffrent df
mu = self.mu
self.assertIsInstance(mu, ModelUtils)
train_df, test_df = mu.split_data_to_train_test()
train_df1, test_df1 = mu.split_data_to_train_test(test_size=0.8)
trained_df = mu.train_model(train_df=train_df1)
trained_shape = trained_df.shape
self.assertEqual(trained_shape[0], list(self.iris_df.shape)[0] * 0.2)
def test_train_model_chosen_columns(self):
# train on diffrent df
mu = self.mu
chosen_columns = ['sepal_length_cm', 'sepal_width_cm']
self.assertIsInstance(mu, ModelUtils)
train_df, test_df = mu.split_data_to_train_test()
# choose columns
trained_df = mu.train_model(columns_lst=chosen_columns)
trained_shape = trained_df.shape # trained shape is th orig spe+ predicted column
self.assertEqual(trained_shape[1], len(list(self.iris_df)) + 1)
pass
# def test_test_model(self):
# pass
#
def test_get_X_df_and_y_s_simple(self):
mu = self.mu
self.assertIsInstance(mu, ModelUtils)
# simple split
train_df, test_df = mu.split_data_to_train_test()
train_shape = train_df.shape
self.assertEqual(train_shape[0], list(self.iris_df.shape)[0] * 0.7)
def test_get_X_df_and_y_s_test_size(self):
mu = self.mu
self.assertIsInstance(mu, ModelUtils)
train_df, test_df = mu.split_data_to_train_test(test_size=0.8)
train_shape = train_df.shape
self.assertEqual(train_shape[0], list(self.iris_df.shape)[0] * 0.2)
def test_get_X_df_and_y_s_other_df(self):
mu = self.mu
self.assertIsInstance(mu, ModelUtils)
train_df, test_df = mu.split_data_to_train_test(df=self.boton_df)
train_shape = train_df.shape
self.assertEqual(train_shape[0], round(list(self.boton_df.shape)[0] * 0.7))
| mit |
TomAugspurger/pandas | pandas/tests/frame/test_sort_values_level_as_str.py | 2 | 2550 | import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
def test_sort_index_level_and_column_label(df_none, df_idx, sort_names, ascending):
# GH 14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(df_none, df_idx, sort_names, ascending):
# GH 14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
if len(levels) > 1:
# Accessing multi-level columns that are not lexsorted raises a
# performance warning
with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False):
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
zooniverse/aggregation | blog/whales.py | 1 | 6020 | __author__ = 'ggdhines'
from aggregation_api import AggregationAPI
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import cv2
import numpy as np
from scipy.spatial import ConvexHull
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
from scipy.spatial import Delaunay
import numpy as np
import math
import shapely
from descartes import PolygonPatch
def plot_polygon(ax,polygon):
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111)
# margin = .3
# x_min, y_min, x_max, y_max = polygon.bounds
# ax.set_xlim([x_min-margin, x_max+margin])
# ax.set_ylim([y_min-margin, y_max+margin])
patch = PolygonPatch(polygon, fc='#999999',
ec='#000000', fill=True,
zorder=-1)
ax.add_patch(patch)
return
def alpha_shape(points, alpha):
"""
Compute the alpha shape (concave hull) of a set
of points.
@param points: Iterable container of points.
@param alpha: alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
"""
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(coords[ [i, j] ])
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)
b = math.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)
c = math.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c)/2.0
# Area of triangle by Heron's formula
area = math.sqrt(s*(s-a)*(s-b)*(s-c))
circum_r = a*b*c/(4.0*area)
# Here's the radius filter.
#print circum_r
if circum_r < 1.0/alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
with AggregationAPI(11,"development") as whales:
whales.__setup__()
postgres_cursor = whales.postgres_session.cursor()
select = "SELECT classification_subjects.subject_id,annotations from classifications INNER JOIN classification_subjects ON classification_subjects.classification_id = classifications.id where workflow_id = 84"
postgres_cursor.execute(select)
for subject_id,annotations in postgres_cursor.fetchall():
f_name = whales.__image_setup__(subject_id)
image_file = cbook.get_sample_data(f_name[0])
image = plt.imread(image_file)
fig, ax1 = plt.subplots(1, 1)
ax1.imshow(image)
plt.show()
inds_0 = image[:,:,0] >= 175
inds_1 = image[:,:,1] >= 175
inds_2 = image[:,:,2] >= 175
inds_white = inds_0 & inds_1 & inds_2
inds = image[:,:,2] >= 50
image[inds] = [255,255,255]
# image[inds_white] = [0,0,0]
# imgray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# ret,thresh = cv2.threshold(imgray,127,255,0)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(thresh, im_bw) = cv2.threshold(gray_image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
im_bw = cv2.threshold(gray_image, thresh, 255, cv2.THRESH_BINARY)[1]
fig, ax1 = plt.subplots(1, 1)
ax1.imshow(im_bw)
plt.show()
fig, ax1 = plt.subplots(1, 1)
image_file = cbook.get_sample_data(f_name[0])
image = plt.imread(image_file)
ax1.imshow(image)
# edges = cv2.Canny(image,50,400)
im2, contours, hierarchy = cv2.findContours(im_bw,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for ii,cnt in enumerate(contours):
if cnt.shape[0] > 20:
cnt = np.reshape(cnt,(cnt.shape[0],cnt.shape[2]))
cnt_list = cnt.tolist()
X,Y = zip(*cnt_list)
# plt.plot(X,Y)
hull = ConvexHull(cnt)
# plt.plot(cnt[hull.vertices,0], cnt[hull.vertices,1], 'r--', lw=2)
shapely_points = [shapely.geometry.shape({"type": "Point", "coordinates": (x,y)}) for (x,y) in zip(X,Y)]
concave_hull, edge_points = alpha_shape(shapely_points,alpha=0.01)
# print edge_points
if isinstance(concave_hull,shapely.geometry.Polygon):
# plot_polygon(ax1,concave_hull)
X,Y = zip(*list(concave_hull.exterior.coords))
plt.plot(X,Y)
else:
for p in concave_hull:
X,Y = zip(*list(p.exterior.coords))
plt.plot(X,Y)
# else:
# for p in concave_hull:
# plot_polygon(ax1,p)
# hull_y = [Y[simplex[0]] for simplex in hull.simplices]
# plt.plot(hull_x,hull_y)
# if cv2.contourArea(cnt) > 0:
# print cv2.contourArea(cnt)
# cv2.drawContours(image, contours, ii, (0,255,0), 3)
plt.ylim((image.shape[0],0))
plt.xlim((0,image.shape[1]))
plt.show()
| apache-2.0 |
luo66/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
janmtl/pypsych | tests/data/generate.py | 1 | 1800 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script for generating mock test data
"""
import pandas as pd
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from pkg_resources import resource_filename
from generators.begaze import generate_mock_begaze_data, save_mock_begaze_data
from generators.biopac import generate_mock_biopac_data, save_mock_biopac_data
from generators.eprime import generate_mock_eprime_data, save_mock_eprime_data
OUTPUT_PATH = 'tests/data/'
N_EVENTS = 12
CONFIG_PATH = resource_filename('tests.config', 'config.yaml')
SCHED_PATH = resource_filename('tests.schedule', 'schedule.yaml')
if __name__ == '__main__':
for subject_id, task_order, task_name in [(101, 1, 'Mock1'),
(101, 2, 'Mock2'),
(102, 1, 'Mock1'),
(102, 2, 'Mock2')]:
# Generate mock datas (beware of order)
BG_DATA = generate_mock_begaze_data(CONFIG_PATH, task_name, N_EVENTS,
SCHED_PATH)
BP_DATA = generate_mock_biopac_data(CONFIG_PATH, task_name, BG_DATA,
SCHED_PATH)
EP_DATA = generate_mock_eprime_data(CONFIG_PATH, task_name, BG_DATA,
SCHED_PATH)
# Save mock datas
save_mock_begaze_data(OUTPUT_PATH, BG_DATA, subject_id, task_order,
task_name)
save_mock_biopac_data(OUTPUT_PATH, BP_DATA, subject_id, task_order,
task_name)
save_mock_eprime_data(OUTPUT_PATH, EP_DATA, subject_id, task_order,
task_name)
| bsd-3-clause |
stanford-ppl/spatial-lang | bin/graph.py | 1 | 10600 | #!/usr/bin/env python
### Plot results
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as ticker
import os, sys
import math
benchmarks = ['BlackScholes'] #['OutProd', 'DotProduct'] #['BlackScholes', 'DotProduct', 'GDA', 'Kmeans', 'GEMM', 'OutProd', 'TPCHQ6']
args = {"DotProduct": [4512, 6, 12, True],
"OutProd": [192, 4, 96, True],
"TPCHQ6": [18720, 3, 48, True],
"BlackScholes": [14496, 1, 7, True],
"GEMM": [50, 96, 864, 2, 2, 12, True],
"Kmeans": [400, 1, 8, 3, 3, 1, True],
"GDA": [960, 1, 16, 16, 2, 1, True, True]}
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
plotInvalid = True
numCols = 1
numRows = len(benchmarks)/numCols
fig, axes = plt.subplots(len(benchmarks)/numCols, numCols*3, sharex='col', sharey='row')
fig.subplots_adjust(wspace=0.12)
fig.subplots_adjust(hspace=0.1)
font_size = 8
marker_iv = '.'
marker_v = '.'
marker_pd = '*'
marker_comp = '*'
color_iv = '#A9A9A9'
color_spd = '#006600'
color_mpd = '#33CC33'
color_seq = '#FFA500'
color_met = '#1E90FF'
color_comp = 'r'
dotSize = 20
compSize = 50
ano_loc = (0.9, 0.85)
for (idx, bm) in enumerate(benchmarks):
f = open('./results/' + bm + '_data.csv')
s = f.read()
f.close()
lines = s.split('\n')
superHeader = lines[0].split(',')
header = lines[1].split(',')
ALMS = 0
for i in range(0, len(superHeader)):
if 'OUTPUTS' in superHeader[i]: ALMS = i
DSPS = ALMS + 1
BRAM = ALMS + 2
CYCL = ALMS + 3
VLID = ALMS + 4
alms = []
dsps = []
bram = []
cycl = []
vlid = []
line = []
pare = []
for i in range(2,len(lines)-1):
ln = lines[i].split(',')
alms.append( 100 * float(ln[ALMS].rstrip()) / 262400 )
dsps.append( 100 * float(ln[DSPS].rstrip()) / 1963 )
bram.append( 100 * float(ln[BRAM].rstrip()) / 2567 )
cyc = float(ln[CYCL].rstrip())
if cyc > 0: cycl.append( math.log10(cyc) )
else: cycl.append(-1)
vlid.append('true' in ln[VLID])
line.append(ln)
N = len(vlid)
almsG = [[],[],[],[],[]]
dspsG = [[],[],[],[],[]]
bramG = [[],[],[],[],[]]
cyclG = [[],[],[],[],[]]
compP = []
for i in range(0,N):
pareto = vlid[i]
if i == 0: j = 1
else: j = 0
while pareto and j < N:
pareto = (not vlid[j]) or alms[i] < alms[j] or cycl[i] < cycl[j] or (alms[i] <= alms[j] and cycl[i] < cycl[j]) or (alms[i] < alms[j] and cycl[i] <= cycl[j])
j = j + 1
if (j == i): j = j + 1
pare.append(pareto)
#if pareto: print line[i]
if vlid[i]:
if pareto and meta[i]: g = 0
elif pareto: g = 1
elif meta[i]: g = 2
else: g = 3
else: g = 4
if alms[i] > 0 and cycl[i] > 0:
almsG[g].append(alms[i])
bramG[g].append(bram[i])
dspsG[g].append(dsps[i])
cyclG[g].append(cycl[i])
match = True
#print bm
for (argIdx, arg) in enumerate(args[bm]):
currArg = line[i][argIdx].rstrip()
if type(arg) is int:
currArg = int(currArg)
if type(arg) is bool:
currArg = 'true' in currArg
if (currArg!=arg): match=False
#print str(argIdx) + " " + str(arg) + " " + str(currArg) + " " + str(currArg==arg)
if match:
if len(compP)!=0: print 'Error! already find the comp point!'; exit()
#if not vlid[i]: print 'Error! comp point found is not vaid!'; exit()
compP = [alms[i], dsps[i], bram[i], cycl[i]]
#print str(line[i]) + str(compP)+ " match!!!"
#if len(compP)==0: compP = [3,2,3,5]
if len(compP)==0: compP = [0, 0, 0, 0] #print 'Error! did not find comp!'; exit()
#### Start plotting
rowIdx = idx%numRows
colIdx = (idx-rowIdx)/numRows
ax1 = axes[rowIdx][colIdx*3+0]
ax2 = axes[rowIdx][colIdx*3+1]
ax3 = axes[rowIdx][colIdx*3+2]
####### ALMs
#ax.set_title("{0} Performance/Area Tradeoff (ALMs)".format(sys.argv[1]))
# Add some axis labels.
if (rowIdx==(numRows-1)):
ax1.set_xlabel("ALM", fontsize=font_size)
ax1.set_ylabel(bm, fontsize=font_size)
ax1.tick_params(axis='both', which='major', labelsize=font_size)
ax1.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
if plotInvalid:
iv = ax1.scatter(almsG[4], cyclG[4], c = color_iv, s = dotSize, marker = marker_iv, edgecolors='none', label = 'Invalid')
s = ax1.scatter(almsG[3], cyclG[3], c = color_met, s = dotSize, marker = marker_v,
edgecolors='none', label = 'Sequential')
m = ax1.scatter(almsG[2], cyclG[2], c = color_seq, s = dotSize, marker = marker_v,
edgecolors='none', label = 'CoarsePipe', alpha = 0.3)
sp = ax1.scatter(almsG[1], cyclG[1], c = color_spd, s = dotSize, marker = marker_pd, edgecolors=color_spd, label = 'Sequential Pareto')
mp = ax1.scatter(almsG[0], cyclG[0], c = color_mpd, s = dotSize, marker = marker_pd, edgecolors=color_mpd, label = 'CoarsePipe Pareto')
comp = ax1.scatter(compP[0], compP[3], c = color_comp, s=compSize, marker =
marker_comp, label = 'Compared Design')
#print bm + " mp" + str(almsG[0]) + str(cyclG[0])
iv.set_rasterized(True)
s.set_rasterized(True)
m.set_rasterized(True)
sp.set_rasterized(True)
mp.set_rasterized(True)
comp.set_rasterized(True)
ax1.grid()
ax1.set_xlim([-1,120])
ax1.annotate(chr(65+idx*3+0), ano_loc, fontsize=font_size, xycoords='axes fraction', ha='center',
va='center', weight='bold')
######### DSPs
#ax.set_title("{0} Performance/Area Tradeoff (DSPs)".format(sys.argv[1]))
if (rowIdx==(numRows-1)):
ax2.set_xlabel("DSP", fontsize=font_size)
ax2.tick_params(axis='both', which='major', labelsize=font_size)
ax2.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
if plotInvalid:
iv = ax2.scatter(dspsG[4], cyclG[4], c = color_iv, s = dotSize, marker = marker_iv, edgecolors='none', label = 'Invalid')
s = ax2.scatter(dspsG[3], cyclG[3], c = color_met, s = dotSize, marker = marker_v,
edgecolors='none',label = 'Sequential')
m = ax2.scatter(dspsG[2], cyclG[2], c = color_seq, s = dotSize, marker = marker_v,
edgecolors='none',label = 'CoarsePipe', alpha = 0.3)
sp = ax2.scatter(dspsG[1], cyclG[1], c = color_spd, s = dotSize, marker = marker_pd, edgecolors='none',label = 'Sequential Pareto')
mp = ax2.scatter(dspsG[0], cyclG[0], c = color_mpd, s = dotSize, marker = marker_pd,edgecolors='none',label = 'CoarsePipe Pareto')
comp = ax2.scatter(compP[1], compP[3], c = color_comp, s=compSize, marker =
marker_comp, label = 'Compared Design')
#plt.legend([m, mp], ['Metapipeline', 'Metapipeline + Pareto'])
ax2.grid()
ax2.set_xlim([-1,120])
ax2.annotate(chr(65+idx*3+1), ano_loc, fontsize=font_size, xycoords='axes fraction', ha='center',
va='center', weight='bold')
iv.set_rasterized(True)
s.set_rasterized(True)
m.set_rasterized(True)
sp.set_rasterized(True)
mp.set_rasterized(True)
comp.set_rasterized(True)
######## BRAM
#ax.set_title("{0} Performance/Area Tradeoff (BRAMs)".format(sys.argv[1]))
if (rowIdx==(numRows-1)):
ax3.set_xlabel("BRAM", fontsize=font_size)
ax3.tick_params(axis='both', which='major', labelsize=font_size)
ax3.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
if plotInvalid:
iv = ax3.scatter(bramG[4], cyclG[4], c = color_iv, s = dotSize, marker = marker_iv, edgecolors='none', label = 'Invalid')
s = ax3.scatter(bramG[3], cyclG[3], c = color_met, s = dotSize, marker =
marker_v,edgecolors='none',label = 'Sequential')
m = ax3.scatter(bramG[2], cyclG[2], c = color_seq, s = dotSize, marker =
marker_v,edgecolors='none', label = 'CoarsePipe', alpha = 0.3)
sp = ax3.scatter(bramG[1], cyclG[1], c = color_spd, s = dotSize, marker = marker_pd,edgecolors='none', label = 'Sequential Pareto')
mp = ax3.scatter(bramG[0], cyclG[0], c = color_mpd, s = dotSize, marker =
marker_pd,edgecolors='none', label = 'CoarsePipe Pareto')
comp = ax3.scatter(compP[2], compP[3], c = color_comp, s=compSize, marker =
marker_comp, label = 'Compared Design')
ax3.grid()
ax3.set_xlim([-1,120])
ax3.tick_params(axis='x', which='major', labelsize=font_size-1)
ax3.annotate(chr(65+idx*3+2), ano_loc, fontsize=font_size, xycoords='axes fraction', ha='center',
va='center', weight='bold')
iv.set_rasterized(True)
s.set_rasterized(True)
m.set_rasterized(True)
sp.set_rasterized(True)
mp.set_rasterized(True)
comp.set_rasterized(True)
if (rowIdx==0):
ax3.legend([m, s, iv, mp, sp, comp], ['Designs with CoarsePipe only',
'Designs with at least one Sequential',
'Invalid design using more than 100% resource',
'Pareto points that have CoarsePipe only',
'Pareto points that have at least one Sequential',
'Design compared with CPU'],
bbox_to_anchor=(1.0, 1.6), ncol=2, fontsize=font_size-1)
fig.text(0.5, 0.04, 'Usage (% of maximum)', ha='center')
fig.text(0.04, 0.5, 'Cycles (Log Scale)', va='center', rotation='vertical')
fig.set_size_inches(7,9)
#plt.show()
plt.savefig('tradeoff.png', format='png', dpi=900)
def onclick(event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
nalms = event.xdata
cycles = event.ydata
minIndex = 0
minDist = -1
minDSP = -1
minBRAM = -1
for i in range(0,N):
if vlid[i]:
dist = abs((alms[i] - nalms)/nalms)*abs((cycl[i] - cycles)/cycles)
if dist < minDist or minDist < 0:
minDist = dist
minIndex = i
closestALM = alms[minIndex]
closestCycles = cycl[minIndex]
minBRAM = bram[minIndex]
print lines[0]
print 'Closest:', line[minIndex], pare[minIndex]
for i in range(0,N):
if vlid[i] and abs((alms[i] - closestALM)/closestALM) < 0.05 and abs((cycl[i] - closestCycles)/closestCycles) < 0.05 and bram[i] < minBRAM:
minBRAM = bram[i]
minIndex = i
print 'Smallest BRAM nearby: ', line[minIndex], pare[minIndex]
#
#cid = fig.canvas.mpl_connect('button_press_event', onclick)
| mit |
ningchi/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
henriquemiranda/yambopy | scripts/analyse_gw.py | 4 | 4423 | import matplotlib
#matplotlib.use('Agg') # prevents crashes if no X server present (clusters)
from yambopy import *
import matplotlib.pyplot as plt
import sys
import argparse
import numpy as np
import operator
"""
Study the convergence of GW calculations by looking at the change in band-gap value.
The script reads from <folder> all results from <variable> calculations and display them.
Use the band and k-point options (or change default values) according to the size of your k-grid and
the location of the band extrema.
"""
parser = argparse.ArgumentParser(description='Study GW convergence with regards to the band-gap value.')
parser.add_argument('folder' , help='Folder containing SAVE and convergence runs.')
parser.add_argument('variable' , help='Variable tested (e.g. FFTGvecs)' )
parser.add_argument('-bc','--bandc' , help='Lowest conduction band number' , default=53, type=int)
parser.add_argument('-kc','--kpointc' , help='K-point index for conduction band', default=19, type=int)
parser.add_argument('-bv','--bandv' , help='Highest valence band number' , default=52, type=int)
parser.add_argument('-kv','--kpointv' , help='K-point index for valence band' , default=1, type=int)
parser.add_argument('-np','--nopack' , help='Skips packing o- files into .json files', action='store_false')
parser.add_argument('-t' ,'--text' , help='Also print a text file for reference' , action='store_true')
args = parser.parse_args()
folder = args.folder
var = args.variable
bandc = args.bandc
kpointc= args.kpointc
bandv = args.bandv
kpointv= args.kpointv
nopack = args.nopack
text = args.text
print 'Valence band: ',bandv,'conduction band: ',bandc
print 'K-point VB: ',kpointv, ' k-point CB: ',kpointc
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if nopack: # True by default, False if -np used
print 'Packing ...'
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print 'Packing done.'
else:
print 'Packing skipped.'
# importing data from .json files in <folder>
print 'Importing...'
data = YamboAnalyser(folder)
# extract data according to relevant variable
outvars = data.get_data(var)
invars = data.get_inputfiles_tag(var)
tags = data.get_tags(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(invars.items(), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print 'Files detected: ',keys
print 'Preparing output...'
### Output
# Unit of the variable :
unit = invars[keys[0]]['variables'][var][1]
# The following variables are used to make the script compatible with both short and extended output
kpindex = tags[keys[0]].tolist().index('K-point')
bdindex = tags[keys[0]].tolist().index('Band')
e0index = tags[keys[0]].tolist().index('Eo')
gwindex = tags[keys[0]].tolist().index('E-Eo')
array = np.zeros((len(keys),2))
for i,key in enumerate(keys):
# input value
# GbndRnge and BndsRnX_ are special cases
if var.startswith('GbndRng') or var.startswith('BndsRnX'):
# format : [1, nband, ...]
array[i][0] = invars[key]['variables'][var][0][1]
else:
array[i][0] = invars[key]['variables'][var][0]
# Output value (gap energy)
# First the relevant lines are identified
valence=[]
conduction=[]
for j in range(len(outvars[key]+1)):
if outvars[key][j][kpindex]==kpointc and outvars[key][j][bdindex]==bandc:
conduction=outvars[key][j]
elif outvars[key][j][kpindex]==kpointv and outvars[key][j][bdindex]==bandv:
valence = outvars[key][j]
# Then the gap can be calculated
array[i][1] = conduction[e0index]+conduction[gwindex]-(valence[e0index]+valence[gwindex])
if text:
filename = folder+'_'+var+'.dat'
header = +var+'('+str(unit)+'), gap'
np.savetxt(filename,array,delimiter='\t',header=header)
print filename
plt.plot(array[:,0],array[:,1],'o-')
plt.xlabel(var+' ('+unit+')')
plt.ylabel('E_gw = E_lda + \Delta E')
plt.show()
#plt.savefig(folder+'_'+var+'.png')
# Plot all of the different GW bandstructures in the same plot
#ya = YamboAnalyser(folder)
#ya.plot_gw('qp',cols=(lambda x: x[2],lambda x: x[3]+x[4]))
| bsd-3-clause |
aurelieladier/openturns | validation/src/optimal_lhs/validate_SA_big.py | 7 | 2430 | #! /usr/bin/env python
import openturns as ot
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from openturns.viewer import View
import time
ot.RandomGenerator.SetSeed(0)
ot.Log.Show(ot.Log.INFO)
# Bounds are [0,1]^dimension
dimension = 50
# Size of sample
size = 100
# Factory: lhs generates
lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size)
lhsDesign.setAlwaysShuffle(True) # randomized
geomProfile = ot.GeometricProfile(10.0, 0.999, 50000)
c2 = ot.SpaceFillingC2()
sa = ot.SimulatedAnnealingLHS(lhsDesign, geomProfile, c2)
tic = time.time()
design = sa.generate()
result = sa.getResult()
toc = time.time()
dt1 = toc-tic
print("time=%f"%dt1)
print("dimension=%d, size=%d,sa=%s"%(dimension, size, sa))
print(str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist()))
crit = result.drawHistoryCriterion()
proba = result.drawHistoryProbability()
temp = result.drawHistoryTemperature()
pp = PdfPages('large_OTLHS.pdf')
# Criterion
fig = View(crit, plot_kwargs={'color':'blue'}).getFigure()
fig.savefig("otlhs_c2_crit_big.png")
pp.savefig(fig)
plt.close(fig)
# Proba
fig = View(proba, plot_kwargs={'marker': 'o', 'ms': 0.6}, axes_kwargs={'ylim': [-0.05, 1.05]}).getFigure()
fig.savefig("lhs_c2_proba_big.png")
pp.savefig(fig)
plt.close(fig)
# Temperature
fig = View(temp).getFigure()
pp.savefig(fig)
plt.close(fig)
minDist = ot.SpaceFillingMinDist()
sa = ot.SimulatedAnnealingLHS(lhsDesign, geomProfile, minDist)
tic = time.time()
design = sa.generate()
result = sa.getResult()
toc = time.time()
dt2 = toc-tic
print("time=%f"%dt2)
print("dimension=%d, size=%d,sa=%s"%(dimension, size, sa))
print(str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist()))
crit = result.drawHistoryCriterion()
proba = result.drawHistoryProbability()
temp = result.drawHistoryTemperature()
# Criterion
fig = View(crit, plot_kwargs={'color':'blue'}).getFigure()
fig.savefig("otlhs_mindist_crit_big.png")
pp.savefig(fig)
plt.close(fig)
# Proba
fig = View(proba, plot_kwargs={'marker': 'o', 'ms': 0.6}, axes_kwargs={'ylim': [-0.05, 1.05]}).getFigure()
fig.savefig("lhs_mindist_proba_big.png")
pp.savefig(fig)
plt.close(fig)
# Temperature
fig = View(temp).getFigure()
pp.savefig(fig)
plt.close(fig)
pp.close()
| lgpl-3.0 |
steffengraber/nest-simulator | pynest/examples/sinusoidal_gamma_generator.py | 8 | 11885 | # -*- coding: utf-8 -*-
#
# sinusoidal_gamma_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""
Sinusoidal gamma generator example
----------------------------------
This script demonstrates the use of the ``sinusoidal_gamma_generator`` and its
different parameters and modes. The source code of the model can be found in
``models/sinusoidal_gamma_generator.h``.
The script is structured into two parts, each of which generates its own
figure. In part 1A, two generators are created with different orders of the
underlying gamma process and their resulting PST (Peristiumulus time) and ISI
(Inter-spike interval) histograms are plotted. Part 1B illustrates the effect
of the ``individual_spike_trains`` switch. In Part 2, the effects of
different settings for rate, phase and frequency are demonstrated.
"""
###############################################################################
# First, we import all necessary modules to simulate, analyze and
# plot this example.
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
###############################################################################
# We first create a figure for the plot and set the resolution of NEST.
plt.figure()
nest.SetKernelStatus({'resolution': 0.01})
###############################################################################
# Then we create two instances of the ``sinusoidal_gamma_generator`` with two
# different orders of the underlying gamma process using ``Create``. Moreover,
# we create devices to record firing rates (``multimeter``) and spikes
# (``spike_recorder``) and connect them to the generators using ``Connect``.
num_nodes = 2
g = nest.Create('sinusoidal_gamma_generator', n=num_nodes,
params={'rate': 10000.0,
'amplitude': 5000.0,
'frequency': 10.0,
'phase': 0.0,
'order': [2.0, 10.0]}) # note the syntax for different order parameter of the two nodes
m = nest.Create('multimeter', num_nodes, {'interval': 0.1, 'record_from': ['rate']})
s = nest.Create('spike_recorder', num_nodes)
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
nest.Simulate(200)
###############################################################################
# After simulating, the spikes are extracted from the ``spike_recorder`` and
# plots are created with panels for the PST and ISI histograms.
colors = ['b', 'g']
for j in range(num_nodes):
ev = m[j].events
t = ev['times']
r = ev['rate']
spike_times = s[j].events['times']
plt.subplot(221)
h, e = np.histogram(spike_times, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(spike_times), bins=np.arange(0., 0.505, 0.01),
histtype='step', color=colors[j])
plt.title('ISI histogram')
###############################################################################
# The kernel is reset and the number of threads set to 4.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
###############################################################################
# First, a ``sinusoidal_gamma_generator`` with ``individual_spike_trains`` set to
# `True` is created and connected to 20 parrot neurons whose spikes are
# recorded by a spike recorder. After simulating, a raster plot of the spikes
# is created.
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_recorder')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = s.events
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
#################################################################################
# The kernel is reset again and the whole procedure is repeated for a
# ``sinusoidal_gamma_generator`` with ``individual_spike_trains`` set to `False`.
# The plot shows that in this case, all neurons receive the same spike train
# from the ``sinusoidal_gamma_generator``.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_recorder')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = s.events
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
###############################################################################
# In part 2, multiple generators are created with different settings for rate,
# phase and frequency. First, we define an auxiliary function, which simulates
# `n` generators for `t` ms. After `t/2`, the parameter dictionary of the
# generators is changed from initial to after.
def step(t, n, initial, after, seed=1, dt=0.05):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt, "rng_seed": seed})
g = nest.Create('sinusoidal_gamma_generator', n, params=initial)
sr = nest.Create('spike_recorder')
nest.Connect(g, sr)
nest.Simulate(t / 2)
g.set(after)
nest.Simulate(t / 2)
return sr.events
###############################################################################
# This function serves to plot a histogram of the emitted spikes.
def plot_hist(spikes):
plt.hist(spikes['times'],
bins=np.arange(0., max(spikes['times']) + 1.5, 1.),
histtype='step')
t = 1000
n = 1000
dt = 1.0
steps = int(t / dt)
offset = t / 1000. * 2 * np.pi
# We create a figure with a 2x3 grid.
grid = (2, 3)
fig = plt.figure(figsize=(15, 10))
###############################################################################
# We simulate a ``sinusoidal_gamma_generator`` with default parameter values,
# i.e. ``ac=0`` and the DC value being changed from 20 to 50 after `t/2` and
# plot the number of spikes per second over time.
plt.subplot(grid[0], grid[1], 1)
spikes = step(t, n,
{'rate': 20.0},
{'rate': 50.0, },
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(int(steps))
exp[:int(steps / 2)] *= 20
exp[int(steps / 2):] *= 50
plt.plot(exp, 'r')
plt.title('DC rate: 20 -> 50')
plt.ylabel('Spikes per second')
###############################################################################
# We simulate a ``sinusoidal_gamma_generator`` with the DC value being changed
# from 80 to 40 after `t/2` and plot the number of spikes per second over
# time.
plt.subplot(grid[0], grid[1], 2)
spikes = step(t, n,
{'order': 6.0, 'rate': 80.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
{'order': 6.0, 'rate': 40.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(int(steps))
exp[:int(steps / 2)] *= 80
exp[int(steps / 2):] *= 40
plt.plot(exp, 'r')
plt.title('DC rate: 80 -> 40')
###############################################################################
# Next, we simulate a ``sinusoidal_gamma_generator`` with the AC value being
# changed from 40 to 20 after `t/2` and plot the number of spikes per
# second over time.
plt.subplot(grid[0], grid[1], 3)
spikes = step(t, n,
{'order': 3.0, 'rate': 40.0, 'amplitude': 40.,
'frequency': 10., 'phase': 0.},
{'order': 3.0, 'rate': 40.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (40. + 40. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2))))
exp[int(steps / 2):] = (40. + 20. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 40 -> 20')
##################################################################################
# Finally, we simulate a ``sinusoidal_gamma_generator`` with a non-zero AC value
# and the DC value being changed from 80 to 40 after `t/2` and plot the
# number of spikes per second over time.
plt.subplot(grid[0], grid[1], 4)
spikes = step(t, n,
{'order': 6.0, 'rate': 20.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 50.0, 'amplitude': 50.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (20. + 20. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2))))
exp[int(steps / 2):] = (50. + 50. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('DC Rate and Rate Modulation: 20 -> 50')
plt.ylabel('Spikes per second')
plt.xlabel('Time [ms]')
###############################################################################
# Simulate a ``sinusoidal_gamma_generator`` with the AC value being
# changed from 0 to 40 after `t/2` and plot the number of spikes per
# second over time.
plt.subplot(grid[0], grid[1], 5)
spikes = step(t, n,
{'rate': 40.0, },
{'amplitude': 40.0, 'frequency': 20.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = 40. * np.ones(int(steps / 2))
exp[int(steps / 2):] = (40. + 40. * np.sin(np.arange(
0, t / 1000. * np.pi * 20, t / 1000. * np.pi * 20. / (steps / 2))))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 0 -> 40')
plt.xlabel('Time [ms]')
###############################################################################
# Simulate a ``sinusoidal_gamma_generator`` with a phase shift at
# `t/2` and plot the number of spikes per second over time.
# Phase shift
plt.subplot(grid[0], grid[1], 6)
spikes = step(t, n,
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 180.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (60. + 60. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2))))
exp[int(steps / 2):] = (60. + 60. * np.sin(np.arange(
0, t / 1000. * np.pi * 10, t / 1000. * np.pi * 10. / (steps / 2)) + offset + np.pi))
plt.plot(exp, 'r')
plt.title('Modulation Phase: 0 -> Pi')
plt.xlabel('Time [ms]')
plt.show()
| gpl-2.0 |
Hanuman26/DeepPy | kmeans.py | 1 | 1206 | from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
np.random.seed()
digits = load_digits()
data = scale(digits.data)
n_sample, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples: %d, \t n_features %d" %(n_digits, n_samples,n_features))
print(79 * '_')
print('% 9s' % 'init'' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f'
%(name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(data,estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
| apache-2.0 |
iamfullofspam/hep_ml | hep_ml/tree.py | 5 | 2085 | from __future__ import division, print_function, absolute_import
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree.tree import DTYPE
import numpy
"""
A wrapper over regression trees is presented here.
This one isn't actually needed by itself, but is an important part of gradient boosting.
GBDT uses (attention!) **transform** method, which returns not
predictions, but indices of leaves for all samples.
"""
__author__ = 'Alex Rogozhnikov'
class ClusteringTree(TransformerMixin):
"""
Trivial wrapper over different decision trees
"""
def transform(self, X):
"""
Return indices of leaves, to which each event belongs.
:param X: numpy.array of shape [n_samples, n_features]
:return: [n_samples] with indices
"""
raise NotImplementedError('should be overriden in descendant')
def predict(self, X):
"""
Predict values, separately for each leaf.
"""
raise NotImplementedError('should be overriden in descendant')
def get_leaf_values(self):
"""
Return values tree predicts for each of leaves.
:return: numpy.array of shape [n_samples]
"""
raise NotImplementedError('should be overriden in descendant')
@staticmethod
def prepare_data(X):
"""Convert dataset to the way when no additional work is needed inside fitting or predicting.
This method is called once to transform dataset.
"""
raise NotImplementedError('should be overriden in descendant')
class SklearnClusteringTree(DecisionTreeRegressor, ClusteringTree):
"""
RegressionTree from scikit-learn, which provides transforming interface.
"""
def transform(self, X):
return self.tree_.apply(X)
def get_leaf_values(self):
return self.tree_.value.flatten()
@staticmethod
def prepare_data(X):
"""Converting to the type needed during fitting sklearn trees."""
return numpy.array(X, dtype=DTYPE)
| apache-2.0 |
massmutual/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
wkerzendorf/chiantipy | chiantipy/chianti/gui_qt/gui.py | 1 | 4296 | '''PyQt4 widget selection dialogs'''
import sys, os
from PyQt4 import QtCore, QtGui
import chianti
from chianti.gui_qt.ui import *
def chpicker(dir, filter='*.*', label='ChiantiPy'):
'''Select a filename using a Qt gui dialog.'''
app=QtGui.QApplication(sys.argv)
a=QtGui.QFileDialog()
a.setDirectory(dir)
a.setFilter(filter)
# mylabel=QtCore.QString('some label')
# a.setLabelText(mylabel)
a.setWindowTitle(label)
a.setModal(True)
a.exec_()
qfilename=a.selectedFiles()
return str(qfilename[0])
#
#
class selectorDialog(QtGui.QDialog):
'''Make a single or multiple selection from a list of items.
expects the input of an array of items, will select one or more'''
def __init__(self, items, label=None , parent=None):
# if using the Qt4Agg backend for matplotlib, the following line needs to be comment out
# app=QtGui.QApplication(sys.argv)
QtGui.QDialog.__init__(self)
self.ui = Ui_selectorDialogForm()
self.ui.setupUi(self)
self.ui.listWidget.setSelectionMode(QtGui.QListWidget.MultiSelection)
if label == None:
self.setWindowTitle('ChiantiPy')
else:
self.setWindowTitle('ChiantiPy - '+label)
imagefile = os.path.join(chianti.__path__[0], "images/chianti2.png")
self.setWindowIcon(QtGui.QIcon(imagefile))
for anitem in items:
# print ' item = ', anitem, QtCore.QString(anitem)
self.ui.listWidget.addItem(str(anitem))
self.exec_()
def accept(self):
# print ' selector button pushed'
nitems = self.ui.listWidget.count()
# print ' nitems = ', nitems
self.selectedIndex=[]
self.selectedText=[]
for i in range(nitems):
anitem = self.ui.listWidget.item(i)
# print 'selected? = ', anitem.isSelected()
if anitem.isSelected():
# print ' item = ' , str(anitem.text())
self.selectedText.append(str(anitem.text()))
self.selectedIndex.append(i)
self.done(1)
def reject(self):
# print ' cancel button pushed'
self.selectedIndex = None
self.selectedText = None
self.done(1)
#
#from choice2DialogForm import *
#
class choice2Dialog(QtGui.QDialog):
'''Make a single or multiple selection from a list of items and another
single or multiple selection from the same list.
Useful for picking numerators and denominators.
expects the input of an array of items, will select one or more from both widgets.'''
def __init__(self, items, label=None , parent=None):
# if using the Qt4Agg backend for matplotlib, the following line needs to be comment out
# app=QtGui.QApplication(sys.argv)
QtGui.QDialog.__init__(self)
# app=QtGui.QApplication(sys.argv)
self.ui = Ui_choice2DialogForm()
self.ui.setupUi(self)
if label == None:
self.setWindowTitle('ChiantiPy')
else:
self.setWindowTitle('ChiantiPy - '+label)
self.setWindowIcon(QtGui.QIcon('images/chianti2.png'))
for anitem in items:
# print ' item = ', anitem, QtCore.QString(anitem)
self.ui.numListWidget.addItem(str(anitem))
self.ui.denListWidget.addItem(str(anitem))
self.exec_()
#
def accept(self):
nitems = self.ui.numListWidget.count()
self.numIndex=[]
self.numText=[]
for i in range(nitems):
anitem = self.ui.numListWidget.item(i)
# print 'selected? = ', anitem.isSelected()
if anitem.isSelected():
# print ' item = ' , str(anitem.text())
self.numText.append(str(anitem.text()))
self.numIndex.append(i)
self.denIndex=[]
self.denText=[]
for i in range(nitems):
anitem = self.ui.denListWidget.item(i)
# print 'selected? = ', anitem.isSelected()
if anitem.isSelected():
# print ' item = ' , str(anitem.text())
self.denText.append(str(anitem.text()))
self.denIndex.append(i)
self.done(1)
def reject(self):
print ' cancel button pushed'
self.done(1)
| gpl-3.0 |
capoe/espressopp.soap | doc/ug/conf.py | 1 | 8126 | # -*- coding: utf-8 -*-
#
# ESPResSo++ documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 23 13:11:32 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import datetime
# Gets version directly from the code
try:
import espressopp
ver = espressopp.Version()
ESPP_VERSION = '{}.{}.{}'.format(
ver.major, ver.minor, ver.patchlevel)
except ImportError:
ESPP_VERSION = '1.9.3'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
# 'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
# 'sphinx.ext.pngmath', 'sphinx.ext.autosummary', 'matplotlib.sphinxext.mathmpl',
# 'matplotlib.sphinxext.only_directives', 'matplotlib.sphinxext.plot_directive']
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append('../..')
sys.path.append(os.path.abspath('sphinxext'))
#sys.path.append(os.path.abspath('/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/matplotlib/sphinxext'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'ipython_console_highlighting',
'sphinxtogithub'
]
# Not yet: numpydoc
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = u'ESPResSo++'
copyright = u'2013-{}, Max Planck Institute for Polymer Research'.format(
datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ESPP_VERSION
# The full version, including alpha/beta/rc tags.
release = ESPP_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'basic'
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{} v{}'.format(project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = 'Logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'default.css'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['globaltoc.html', 'custom_links_sidebar.html', 'searchbox.html'], }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ESPResSodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index_latex', 'ESPResSo++.tex', u'ESPResSo++ Documentation',
u'Torsten Stuehn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/espp_logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-3.0 |
e-sensing/wtss.py | examples/time-series-plot3.py | 1 | 1448 | #
# Copyright (C) 2014 National Institute For Space Research (INPE) - Brazil.
#
# This file is part of Python Client API for Web Time Series Service.
#
# Web Time Series Service for Python is free software: you can
# redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Web Time Series Service for Python is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Web Time Series Service for Python. See LICENSE. If not, write to
# e-sensing team at <[email protected]>.
#
import matplotlib.pyplot as pyplot
import matplotlib.dates as mdates
from wtss import wtss
# The WTSS service is at: http://www.dpi.inpe.br/tws
w = wtss("http://www.dpi.inpe.br/tws")
# retrieve the time series for location (-54, -12)
ts = w.time_series("mod13q1_512", "red", -12.0, -54.0, start_date="2001-01-01", end_date="2001-12-31")
fig, ax = pyplot.subplots()
ax.plot(ts.timeline, ts["red"], 'o-')
xfmt = mdates.DateFormatter('%d-%m-%Y')
ax.xaxis.set_major_formatter(xfmt)
fig.autofmt_xdate()
pyplot.show()
| lgpl-3.0 |
tectronics/dicom-sr-qi | unported scripts/simulate_data.py | 2 | 4847 | import os, sys
#allow imports of standard srqi modules
srqi_containing_dir = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(srqi_containing_dir)
from srqi.core import my_utils
import heapq
import numpy as np
import random
import csv
NUM_CPTS = 3
WINDOW_SIZE = 400
def get_improvement_pattern(most_rad1, least_rad1, common_cpts, simulate_procs):
"""
Parameters:
most_rad1 - string of the cpt code of the rad1 that uses the most fluoro
on average
least_rad1 - string of the cpt code of the rad1 that uses the least
fluoro on average
common_cpts - a dictionary of dictionaries.
common_cpts[cpt_string][rad1] -> list of Syngo objects
simulate_procs - a list of procedures which we will use as a basis
for the simulated procedures. we will just replace the fluoro times
of these procedures in order to generate the simulated procs
Returns:
None. Just modifies the procedures in simulate_procs
"""
total_procs = len(simulate_procs)
simulate_procs.sort(key = lambda p: p.get_start_date())
for i,proc in enumerate(simulate_procs):
proc.rad1 = "Simulant"
cpt = proc.get_cpts_as_string()
prob = float(i)/total_procs
rand = random.random()
# choose which pool of procedures to draw from according to the
# random number. over time the chances of drawing from the pool that
# uses less fluoro increases
if rand > prob:
if i % 100 ==0:
print ("Most", rand, prob)
proc.fluoro = random.choice(common_cpts[cpt][most_rad1]).fluoro
else:
if i % 100 == 0:
print ("Least", rand, prob)
proc.fluoro = random.choice(common_cpts[cpt][least_rad1]).fluoro
def simulate_from_real_data(simulate_procs, syngo_procs):
# get the most common cpt codes as common_cpts[cpt_string] -> list of Syngo objects
sprocs_by_cpt = my_utils.organize(syngo_procs, lambda p:p.get_cpts_as_string())
common_cpts = heapq.nlargest(NUM_CPTS,
sprocs_by_cpt.iteritems(),
key =lambda x:len(x[1]))
#most_common_cpt = common_cpts[0][0]
common_cpts = dict(common_cpts)
# now make common_cpts[cpt_string][rad1] -> list of Syngo objects
for k in common_cpts.keys():
common_cpts[k] = my_utils.organize(common_cpts[k], lambda p:p.rad1)
# find the physician who use the most and least fluoro on average in the
# most common procedure
least_rad1 = "PICUS, D."
#least_rad1 = min(common_cpts[most_common_cpt].keys(),
# key = lambda rad1: np.mean([p.fluoro for p in common_cpts[most_common_cpt][rad1]]))
# take the second most, since the most hasn't done any of some common procedures
#_, most_rad1 = heapq.nlargest(2,common_cpts[most_common_cpt].keys(),
# key = lambda rad1: np.mean([p.fluoro for p in common_cpts[most_common_cpt][rad1]]))
most_rad1 = "MANI, N."
simulate_procs = [p for p in simulate_procs if p.get_cpts_as_string() in common_cpts]
get_improvement_pattern(most_rad1, least_rad1, common_cpts, simulate_procs)
most_rad1_procs = [p for p in syngo_procs if p.get_cpts_as_string() in common_cpts and p.rad1 == most_rad1]
least_rad1_procs = [p for p in syngo_procs if p.get_cpts_as_string() in common_cpts and p.rad1 == least_rad1]
return simulate_procs, most_rad1_procs, least_rad1_procs
def main():
# get all the Syngo objects
procs, extra_procs = my_utils.get_procs_from_files(["C:\\Users\\mcstrother\\Documents\\Duncan Research\\srqi\\Data\\BJH\\NEW_____Combined months_IR_Syngo_KAR4_All-Exams.xls"])
for p in procs:
if p.has_syngo():
extra_procs.append(p.get_syngo())
syngo_procs = [p for p in extra_procs if not p.fluoro is None]
simulate_procs = [p for p in syngo_procs if p.rad1 == 'MANI, N.']
# manipulate the procedures
simulate_procs, most_rad1_procs, least_rad1_procs = simulate_from_real_data(simulate_procs,
syngo_procs)
# analyze using inquiry
from srqi.inquiries.operator_improvement import Operator_Improvement
import matplotlib.pyplot as plt
oi_cls = Operator_Improvement
oi_cls.MIN_REPS.set_value(100)
oi_cls.PROCS_PER_WINDOW.set_value(WINDOW_SIZE)
oi = oi_cls([], [], simulate_procs + syngo_procs)
# write tables
writer = csv.writer(open('sim_out_'+str(WINDOW_SIZE)+'.csv', 'wb'))
for t in oi.get_tables():
writer.writerows(t)
# plot
oi.get_figures()
#plt.show()
if __name__ == '__main__':
main()
| bsd-2-clause |
anurag313/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
Arcanemagus/plexpy | lib/tqdm/__init__.py | 8 | 1268 | from ._tqdm import tqdm
from ._tqdm import trange
from ._tqdm_gui import tqdm_gui
from ._tqdm_gui import tgrange
from ._tqdm_pandas import tqdm_pandas
from ._main import main
from ._monitor import TMonitor, TqdmSynchronisationWarning
from ._version import __version__ # NOQA
from ._tqdm import TqdmTypeError, TqdmKeyError, TqdmWarning, \
TqdmDeprecationWarning, TqdmExperimentalWarning, \
TqdmMonitorWarning
__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
'TqdmTypeError', 'TqdmKeyError',
'TqdmWarning', 'TqdmDeprecationWarning',
'TqdmExperimentalWarning',
'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
'__version__']
def tqdm_notebook(*args, **kwargs): # pragma: no cover
"""See tqdm._tqdm_notebook.tqdm_notebook for full documentation"""
from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook
return _tqdm_notebook(*args, **kwargs)
def tnrange(*args, **kwargs): # pragma: no cover
"""
A shortcut for tqdm_notebook(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
from ._tqdm_notebook import tnrange as _tnrange
return _tnrange(*args, **kwargs)
| gpl-3.0 |
pyspeckit/pyspeckit | pyspeckit/spectrum/interactive.py | 4 | 23628 | """
==================
Window Interaction
==================
A general module for selecting regions and inputting guesses via the
interactive window.
"""
from __future__ import print_function
import numpy
from . import units
from astropy import log
from six.moves import xrange
from six import iteritems
class Interactive(object):
def __init__(self, Spectrum, guesses=None,
interactive_help_message="Replace this message"):
"""
Declare interactive variables.
Must have a parent Spectrum class
**Must declare button2action and button3action**
"""
self.Spectrum = Spectrum
self.interactive_help_message = interactive_help_message
# includemask should not be a masked array even if data is
# masked arrays are apparently full of bugs...
self.includemask = numpy.ones(self.Spectrum.data.size, dtype='bool')
self.xclicks = []
self.yclicks = []
self.event_history = []
self.guesses = guesses
# Click counters
self.nclicks_b1 = 0 # button 1
self.nclicks_b2 = 0 # button 2
# Temporary storage (for left, right clicking)
self._xclick1 = None
self._xclick2 = None
# Set min/max of range
self.xmin = 0
self.xmax = self.Spectrum.xarr.shape[0]
# Init button 1/2 plots
self.button1plot = []
self.button2plot = []
self.use_window_limits = None
# initialization: Glue can activate fitters without start_interactive,
# so these need to be declared
self.click = None
self.keyclick = None
self._debug = False
@property
def xmin(self):
return self._xmin
@xmin.setter
def xmin(self, value):
self._xmin = int(value)
@property
def xmax(self):
return self._xmax
@xmax.setter
def xmax(self, value):
self._xmax = int(value)
def event_manager(self, event, force_over_toolbar=False, debug=False):
"""
Decide what to do given input (click, keypress, etc.)
"""
if hasattr(self.Spectrum.plotter.figure.canvas.manager, 'toolbar'):
toolbar = self.Spectrum.plotter.figure.canvas.manager.toolbar
toolmode = toolbar.mode
else:
# If interactivity isn't possible, we don't really care what tool is 'active'
toolmode = ''
self.event_history.append(event)
#DEBUG print("toolmode = {0} force_over_toolbar={1}".format(toolmode, force_over_toolbar))
if (toolmode == '' or force_over_toolbar) and self.Spectrum.plotter.axis in event.canvas.figure.axes:
if hasattr(event,'button'):
button = event.button
elif hasattr(event,'key'):
button = event.key
#DEBUG print("Event: {0}".format(event))
if event.xdata is None or event.ydata is None:
return
if debug or self._debug:
log.debug("button: {0} x,y: {1},{2} "
" nclicks 1: {3:f} 2: {4:f}".format
(button, event.xdata, event.ydata, self.nclicks_b1,
self.nclicks_b2,))
if button in ('p','P','1',1,'i','a'): # p for... parea? a for area. i for include
# button one is always region selection
#DEBUG print("Button is {0}".format(button))
self._selectregion_interactive(event,debug=debug)
elif button in ('c','C'):
self.clear_highlights()
self.clear_all_connections()
self.Spectrum.plotter()
elif button in ('e','x','E','X'): # e for exclude, x for x-clude
# exclude/delete/remove
self._selectregion_interactive(event, mark_include=False, debug=debug)
elif button in ('m','M','2',2): # m for mark
if debug or self._debug:
log.debug("Button 2 action")
self.button2action(event,debug=debug)
elif button in ('d','D','3',3): # d for done
if debug or self._debug:
log.debug("Button 3 action")
self.button3action(event,debug=debug)
elif button in ('?'):
# print statement: we really want this to go to the terminal
print(self.interactive_help_message)
elif hasattr(self,'Registry') and button in self.Registry.fitkeys:
fittername = self.Registry.fitkeys[button]
if fittername in self.Registry.multifitters:
self.fitter = self.Registry.multifitters[fittername]
self.fittype = fittername
print("Selected multi-fitter %s" % fittername)
else:
print("ERROR: Did not find fitter %s" % fittername)
if self.Spectrum.plotter.autorefresh: self.Spectrum.plotter.refresh()
elif debug or self._debug:
print("Button press not acknowledged. event={0}, toolmode={1}".format(event,
toolmode))
def _selectregion_interactive(self, event, mark_include=True, debug=False, **kwargs):
"""
select regions for baseline fitting
"""
xpix = self.Spectrum.xarr.x_to_pix(event.xdata)
if self.xclicks == []:
self._firstclick_selection(not mark_include)
if self.nclicks_b1 == 0:
self.nclicks_b1 = 1
self._xclick1 = xpix
self.xclicks.append(xpix)
if debug or self._debug:
print("Click 1: clickx=%i xmin=%i, xmax=%i" % (xpix,self.xmin,self.xmax))
elif self.nclicks_b1 == 1:
self._xclick2 = xpix
self.nclicks_b1 = 0
self.xclicks.append(xpix)
# force click1 to be left (swap)
if self._xclick1 > self._xclick2:
self._xclick1,self._xclick2 = self._xclick2,self._xclick1
# ensure that the fit/plot range is at least as large as the click range
if self.xmin > self._xclick1: self.xmin = self._xclick1
if self.xmax < self._xclick2: self.xmax = self._xclick2
# change the includemask
self.includemask[self._xclick1:self._xclick2] = mark_include
if mark_include:
self.highlight_fitregion(**kwargs)
else: # mark include=False -> mark_exclude=True
for highlight_line in self.button1plot:
hlx,hly = highlight_line.get_data()
hide = ((hlx > self.Spectrum.xarr[self._xclick1]) *
(hlx < self.Spectrum.xarr[self._xclick2]))
hly[hide] = numpy.nan
highlight_line.set_ydata(hly)
self.Spectrum.plotter.refresh()
if debug or self._debug:
print("Click 2: clickx=%i xmin=%i, xmax=%i" % (xpix,self.xmin,self.xmax))
self._update_xminmax()
def highlight_fitregion(self, drawstyle='steps-mid', color=(0,0.8,0,0.5),
linewidth=2, alpha=0.5, clear_highlights=True,
**kwargs):
"""
Re-highlight the fitted region
kwargs are passed to `matplotlib.plot`
"""
if clear_highlights:
self.clear_highlights()
bad = self.Spectrum.data*0
bad[~self.includemask] = numpy.nan
self.button1plot += self.Spectrum.plotter.axis.plot(
self.Spectrum.xarr,
# +bad adds nans to points that are not to be included
self.Spectrum.data+self.Spectrum.plotter.offset+bad,
drawstyle=drawstyle, color=color,
linewidth=linewidth,
alpha=alpha,
**kwargs)
self.Spectrum.plotter.refresh()
def _firstclick_selection(self, include_all=False):
"""
Initialize the include/exclude mask
"""
self.Spectrum.plotter.axis.set_autoscale_on(False)
if include_all:
# default to including everything
self.includemask = numpy.array(self.Spectrum.data, dtype='bool') + True
else:
# default to including nothing
self.includemask = numpy.array(self.Spectrum.data, dtype='bool') * False
def guesspeakwidth(self,event,debug=False,nwidths=1,**kwargs):
"""
Interactively guess the peak height and width from user input
Width is assumed to be half-width-half-max
"""
modnum = 1+nwidths
if debug or self._debug: print("nclicks: %i nwidths: %i modnum: %i" % (self.nclicks_b2,nwidths,modnum))
if self.nclicks_b2 == 0:
self.firstclick_guess()
if self.nclicks_b2 % modnum == 0:
# even clicks are peaks
if self.Spectrum.baseline.subtracted:
peakguess = event.ydata
else:
peakguess = event.ydata - self.Spectrum.baseline.basespec[self.Spectrum.xarr.x_to_pix(event.xdata)]
self.guesses += [peakguess,event.xdata] + [1]*nwidths
self.npeaks += 1
self.nclicks_b2 += 1
if debug or self._debug:
print("Peak %i click %i at x,y %g,%g" % (self.npeaks,self.nclicks_b2,event.xdata,event.ydata))
self.button2plot += [self.Spectrum.plotter.axis.scatter(event.xdata,event.ydata,marker='x',c='r')]
#self.Spectrum.plotter.refresh() #plot(**self.Spectrum.plotter.plotkwargs)
elif self.nclicks_b2 % modnum >= 1:
# odd clicks are widths
whichwidth = self.nclicks_b2 % modnum
widthguess = (abs(event.xdata-self.guesses[-1-nwidths]) /
numpy.sqrt(2*numpy.log(2)))
if numpy.isnan(widthguess) or widthguess <= 0:
newwidthguess = numpy.abs(self.Spectrum.xarr.diff()).min().value
if newwidthguess <= 0:
raise ValueError("A width guess could not be determined.")
log.exception("Error: width guess was {0}. It is being forced to {1}."
.format(widthguess, newwidthguess))
widthguess = newwidthguess
self.guesses[-whichwidth] = widthguess
if debug or self._debug:
print("Width %i whichwidth %i click %i at x,y %g,%g width: %g" % (self.npeaks,whichwidth,self.nclicks_b2,event.xdata,event.ydata,self.guesses[-whichwidth]))
self.button2plot += self.Spectrum.plotter.axis.plot([event.xdata,
2*self.guesses[-1-nwidths]-event.xdata],
[event.ydata]*2,
color='r')
#self.Spectrum.plotter.refresh() #plot(**self.Spectrum.plotter.plotkwargs)
if self.nclicks_b2 / (1+nwidths) > self.npeaks:
print("There have been %i middle-clicks but there are only %i features" % (self.nclicks_b2,self.npeaks))
self.npeaks += 1
self.nclicks_b2 += 1
else:
raise ValueError("Bug in guesspeakwidth: somehow, the number of clicks doesn't make sense.")
if debug or self._debug:
print("Guesses: ",self.guesses)
def firstclick_guess(self):
"""
Initialize self.guesses
"""
self.Spectrum.plotter.axis.set_autoscale_on(False)
if self.guesses is None:
self.guesses = []
elif len(self.guesses) > 0:
for ii in xrange(len(self.guesses)):
self.guesses.pop()
def clear_all_connections(self, debug=False):
"""
Prevent overlapping interactive sessions
"""
# this is really ugly, but needs to be done in order to prevent multiple overlapping calls...
cids_to_remove = []
if not hasattr(self.Spectrum.plotter.figure,'canvas'):
# just quit out; saves a tab...
if debug or self._debug:
print("Didn't find a canvas, quitting.")
# just in case? This should be *very* unreachable...
self.Spectrum.plotter._active_gui = None
return
for eventtype in ('button_press_event','key_press_event'):
for key,val in iteritems(self.Spectrum.plotter.figure.canvas.callbacks.callbacks[eventtype]):
if hasattr(val, 'func') and "event_manager" in val.func.__name__:
cids_to_remove.append(key)
if debug or self._debug: print("Removing CID #%i with attached function %s" % (key,val.func.__name__))
for cid in cids_to_remove:
self.Spectrum.plotter.figure.canvas.mpl_disconnect(cid)
self.Spectrum.plotter._reconnect_matplotlib_keys()
# Click counters - should always be reset!
self.nclicks_b1 = 0 # button 1
self.nclicks_b2 = 0 # button 2
self.Spectrum.plotter._active_gui = None
def start_interactive(self, debug=False, LoudDebug=False,
reset_selection=False, print_message=True,
clear_all_connections=True, **kwargs):
"""
Initialize the interative session
Parameters
----------
print_message : bool
Print the interactive help message?
clear_all_connections : bool
Clear all matplotlib event connections?
(calls :func:`self.clear_all_connections`)
reset_selection : bool
Reset the include mask to be empty, so that you're setting up a
fresh region.
"""
if reset_selection:
self.includemask[:] = False
if print_message:
print(self.interactive_help_message)
if clear_all_connections:
self.clear_all_connections()
self.Spectrum.plotter._disconnect_matplotlib_keys()
global_kwargs = kwargs
def key_manager(x, *args, **kwargs):
kwargs.update(global_kwargs)
return self.event_manager(x, *args, debug=debug, **kwargs)
def click_manager(x, *args, **kwargs):
kwargs.update(global_kwargs)
return self.event_manager(x, *args, debug=debug, **kwargs)
key_manager.__name__ = "event_manager"
click_manager.__name__ = "event_manager"
self.click = self.Spectrum.plotter.axis.figure.canvas.mpl_connect('button_press_event',click_manager)
self.keyclick = self.Spectrum.plotter.axis.figure.canvas.mpl_connect('key_press_event',key_manager)
self._callbacks = self.Spectrum.plotter.figure.canvas.callbacks.callbacks
assert self._check_connections()
self.Spectrum.plotter._active_gui = self
def _check_connections(self, verbose=True):
"""
Make sure the interactive session acepts user input
"""
# check for connections
OKclick = False
OKkey = False
for cb in self._callbacks.values():
if self.click in cb.keys():
OKclick = True
if self.keyclick in cb.keys():
OKkey = True
if self.keyclick == self.click:
OKkey = False
if verbose and not OKkey:
print("Interactive session failed to connect keyboard. Key presses will not be accepted.")
if verbose and not OKclick:
print("Interactive session failed to connect mouse. Mouse clicks will not be accepted.")
return OKkey and OKclick
def clear_highlights(self):
"""
Hide and remove "highlight" colors from the plot indicating the
selected region
"""
for p in self.button1plot:
p.set_visible(False)
if self.Spectrum.plotter.axis and p in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(p)
self.button1plot=[] # I should be able to just remove from the list... but it breaks the loop...
self.Spectrum.plotter.refresh()
def selectregion(self, xmin=None, xmax=None, xtype='wcs', highlight=False,
fit_plotted_area=True, reset=False, verbose=False,
debug=False, use_window_limits=None, exclude=None,
**kwargs):
"""
Pick a fitting region in either WCS units or pixel units
Parameters
----------
*xmin / xmax* : [ float ]
The min/max X values to use in X-axis units (or pixel units if xtype is set).
TAKES PRECEDENCE ALL OTHER BOOLEAN OPTIONS
*xtype* : [ string ]
A string specifying the xtype that xmin/xmax are specified in. It can be either
'wcs' or any valid xtype from :class:`pyspeckit.spectrum.units`
*reset* : [ bool ]
Reset the selected region to the full spectrum? Only takes effect
if xmin and xmax are not (both) specified.
TAKES PRECEDENCE ALL SUBSEQUENT BOOLEAN OPTIONS
*fit_plotted_area* : [ bool ]
Use the plot limits *as specified in :class:`pyspeckit.spectrum.plotters`*?
Note that this is not necessarily the same as the window plot limits!
*use_window_limits* : [ bool ]
Use the plot limits *as displayed*. Defaults to self.use_window_limits
(:attr:`pyspeckit.spectrum.interactive.use_window_limits`).
Overwrites xmin,xmax set by plotter
exclude: {list of length 2n,'interactive', None}
* interactive: start an interactive session to select the
include/exclude regions
* list: parsed as a series of (startpoint, endpoint) in the
spectrum's X-axis units. Will exclude the regions between
startpoint and endpoint
* None: No exclusion
"""
if debug or self._debug:
log.info("".join(map(str, ("selectregion kwargs: ",kwargs," use_window_limits: ",use_window_limits," reset: ",reset," xmin: ",xmin, " xmax: ",xmax))))
if reset:
if verbose or debug or self._debug:
print("Resetting xmin/xmax to full limits of data")
self.xmin = 0
# End-inclusive!
self.xmax = self.Spectrum.data.shape[0]
self.includemask[self.xmin:self.xmax] = True
#raise ValueError("Need to input xmin and xmax, or have them set by plotter, for selectregion.")
if xmin is not None and xmax is not None:
if verbose or debug or self._debug:
log.info("Setting xmin,xmax from keywords %g,%g" % (xmin,xmax))
if xtype.lower() in ('wcs',) or xtype in units.xtype_dict:
self.xmin = numpy.floor(self.Spectrum.xarr.x_to_pix(xmin))
# End-inclusive!
self.xmax = numpy.ceil(self.Spectrum.xarr.x_to_pix(xmax))+1
else:
self.xmin = xmin
# NOT end-inclusive! This is PYTHON indexing
self.xmax = xmax
self.includemask[self.xmin:self.xmax] = True
elif (self.Spectrum.plotter.xmin is not None and
self.Spectrum.plotter.xmax is not None and fit_plotted_area):
if use_window_limits or (use_window_limits is None and self.use_window_limits):
if debug or self._debug:
print("Resetting plotter xmin,xmax and ymin,ymax to the currently visible region")
self.Spectrum.plotter.set_limits_from_visible_window(debug=debug)
self.xmin = numpy.floor(self.Spectrum.xarr.x_to_pix(self.Spectrum.plotter.xmin))
self.xmax = numpy.ceil(self.Spectrum.xarr.x_to_pix(self.Spectrum.plotter.xmax))
if self.xmin>self.xmax:
self.xmin,self.xmax = self.xmax,self.xmin
# End-inclusive! Note that this must be done after the min/max swap!
# this feels sketchy to me, but if you don't do this the plot will not be edge-inclusive
# that means you could do this reset operation N times to continuously shrink the plot
self.xmax += 1
if debug or self._debug:
log.debug("Including all plotted area (as defined by "
"[plotter.xmin={0}, plotter.xmax={1}]) for "
"fit".format(self.Spectrum.plotter.xmin,
self.Spectrum.plotter.xmax))
log.debug("Including self.xmin:self.xmax = {0}:{1}"
" (and excluding the rest)".format(self.xmin,
self.xmax))
self.includemask[self.xmin:self.xmax] = True
else:
if verbose:
log.info("Left region selection unchanged."
" xminpix, xmaxpix: %i,%i" % (self.xmin,self.xmax))
if self.xmin == self.xmax:
# Reset if there is no fitting region
self.xmin = 0
# End-inclusive
self.xmax = self.Spectrum.data.shape[0]
log.debug("Reset to full range because the endpoints were equal")
elif self.xmin>self.xmax:
# Swap endpoints if the axis has a negative delta-X
self.xmin,self.xmax = self.xmax,self.xmin
log.debug("Swapped endpoints because the left end was greater than the right")
self.includemask[:self.xmin] = False
self.includemask[self.xmax:] = False
# Exclude keyword-specified excludes. Assumes exclusion in current X array units
log.debug("Exclude: {0}".format(exclude))
if (isinstance(exclude, str) and (exclude == 'interactive')):
self.start_interactive()
elif exclude is not None and len(exclude) % 2 == 0:
for x1,x2 in zip(exclude[::2],exclude[1::2]):
if xtype.lower() in ('wcs',) or xtype in units.xtype_dict:
x1 = self.Spectrum.xarr.x_to_pix(x1)
# WCS units should be end-inclusive
x2 = self.Spectrum.xarr.x_to_pix(x2)+1
# correct for order if WCS units are used
# if pixel units are being used, we assume the user has
# done so intentionally
# TODO: if xarr, data go opposite directions, this swap
# doesn't work.
if x1 > x2:
x1,x2 = x2,x1
log.debug("Exclusion pixels: {0} to {1}".format(x1,x2))
self.includemask[x1:x2] = False
elif exclude is not None:
log.error("An 'exclude' keyword was specified with an odd number "
"of parameters, which is not permitted.")
if highlight:
self.highlight_fitregion()
self._update_xminmax()
if debug or self._debug:
log.debug("At the end of selectregion, xmin, xmax = {0},{1}"
" and includemask.sum() == {2}"
.format(self.xmin, self.xmax, self.includemask.sum()))
def _update_xminmax(self):
try:
whinclude = numpy.where(self.includemask)
self.xmin = whinclude[0][0]
# MUST be end-inclusive!
self.xmax = whinclude[0][-1]+1
except IndexError:
pass
| mit |
dhruv13J/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
LeSam/avoplot | src/avoplot/gui/widgets.py | 3 | 9629 | #Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
"""
The widgets module contains a set of convenience widgets for building
control panels for elements.
"""
import wx
import wx.combo
from avoplot.gui import text
class SettingBase(wx.BoxSizer):
"""
Base class for settings controls.
"""
def __init__(self, parent, label):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
if label:
text = wx.StaticText(parent, -1, label)
self.Add(text, 0, wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT)
class ColourSetting(SettingBase):
"""
A text label next to a wx colour picker control.
"""
def __init__(self, parent, label, default_colour, callback):
SettingBase.__init__(self, parent, label)
cp = wx.ColourPickerCtrl(parent, -1, default_colour)
self.Add(cp, 0 , wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT)
wx.EVT_COLOURPICKER_CHANGED(parent,cp.GetId(), callback)
class EditableCheckBox(wx.BoxSizer):
def __init__(self, parent, label, edit_label='edit'):
"""
A wx.Checkbox which displays a hyperlink next to it when checked. This
should be subclassed and the on_checkbox and on_edit_link methods should
be overridden to handle their respective events.
"""
self.parent = parent
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
self.checkbox = wx.CheckBox(parent, -1, label+" ")
self.edit_link = wx.HyperlinkCtrl(parent, wx.ID_ANY, edit_label, "",
style=wx.HL_ALIGN_CENTRE|wx.BORDER_NONE)
self.edit_link_parentheses = [wx.StaticText(parent, wx.ID_ANY, "("),
wx.StaticText(parent, wx.ID_ANY, ")")]
f = self.edit_link.GetFont()
f.SetUnderlined(False)
self.edit_link.SetFont(f)
self.edit_link.SetVisitedColour(self.edit_link.GetNormalColour())
#gridlines editing
self.edit_link.Show(False)
self.edit_link_parentheses[0].Show(False)
self.edit_link_parentheses[1].Show(False)
wx.EVT_HYPERLINK(parent, self.edit_link.GetId(), self.on_edit_link)
self.Add(self.checkbox,0,wx.ALIGN_LEFT|wx.ALIGN_CENTRE_VERTICAL)
self.Add(self.edit_link_parentheses[0],0,wx.ALIGN_LEFT|wx.ALIGN_CENTRE_VERTICAL|wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
self.Add(self.edit_link,0,wx.ALIGN_LEFT|wx.ALIGN_CENTRE_VERTICAL|wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
self.Add(self.edit_link_parentheses[1],0,wx.ALIGN_LEFT|wx.ALIGN_CENTRE_VERTICAL|wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
wx.EVT_CHECKBOX(parent, self.checkbox.GetId(), self._on_checkbox)
def set_checked(self, value):
"""
Sets the checkbox to be either checked (value = True), or unchecked
(value = False).
"""
self.checkbox.SetValue(value)
self.edit_link.Show(True)
self.edit_link_parentheses[0].Show(True)
self.edit_link_parentheses[1].Show(True)
def _on_checkbox(self, evnt):
"""
Event handler for the gridlines checkbox.
"""
self.edit_link.Show(evnt.IsChecked())
self.edit_link_parentheses[0].Show(evnt.IsChecked())
self.edit_link_parentheses[1].Show(evnt.IsChecked())
self.on_checkbox(evnt)
def on_checkbox(self, evnt):
"""
Event handler for checkbox events. This should be overridden in the
subclass.
"""
pass
def on_edit_link(self, evnt):
"""
Event handler for clicks on the hyperlink. This should be overridden in
the subclass.
"""
pass
class TextSetting(SettingBase, text.AnimatedText):
"""
A text label next to a wx text entry control. The matplotlib Text object
associated with the control is automatically animated to provide fast
redraws when the text is changed.
A font properties button is displayed next to the text control (if the text
entry box is not empty) which opens a font properties dialog.
"""
def __init__(self, parent, label, text_obj):
SettingBase.__init__(self, parent, label)
text.AnimatedText.__init__(self, text_obj)
self.text_obj = text_obj
self.parent = parent
self.mpl_figure = text_obj.get_figure()
self.__bkgd_region = None
self.tc = wx.TextCtrl(parent, -1, value=text_obj.get_text(),
style=wx.TE_PROCESS_ENTER)
wx.EVT_TEXT(parent, self.tc.GetId(), self.on_text_change)
self.Add(self.tc, 1, wx.ALIGN_CENTRE_VERTICAL)
prop_bmp = wx.ArtProvider.GetBitmap("avoplot_text_prop",wx.ART_BUTTON)
self.prop_button = wx.BitmapButton(parent, wx.ID_ANY, prop_bmp)
self.prop_button.SetToolTip(wx.ToolTip("Edit font properties"))
self.Add(self.prop_button, 0, wx.ALIGN_CENTER_VERTICAL |
wx.RESERVE_SPACE_EVEN_IF_HIDDEN)
wx.EVT_BUTTON(parent, self.prop_button.GetId(), self.on_text_prop_button)
wx.EVT_SET_FOCUS(self.tc,self.on_focus)
wx.EVT_KILL_FOCUS(self.tc,self.on_unfocus)
#hide the button if it is an empty string
if not self.text_obj.get_text():
self.prop_button.Show(False)
def on_focus(self, evnt):
"""
Event handler for when the control gets focus. Starts the text animation
(i.e. caches the background).
"""
self.start_text_animation()
evnt.Skip()
def on_unfocus(self, evnt):
"""
Event handler for when the control loses focus. Stops the text animation
"""
self.stop_text_animation()
evnt.Skip()
def on_text_change(self, evnt):
"""
Event handler for text change events. Updates the text on the figure and
redraws it.
"""
self.text_obj.set_text(evnt.GetString())
self.redraw_text()
if evnt.GetString():
if not self.prop_button.IsShown():
self.prop_button.Show(True)
#need to explicitly redraw the window area containing the button
#otherwise it remains hidden (only a problem on Windows)
self.parent.Refresh(rect=self.prop_button.GetRect())
else:
self.prop_button.Show(False)
def on_text_prop_button(self, evnt):
"""
Event handler for clicks on the "font properties" button. Opens a font
properties dialog.
"""
text.TextPropertiesEditor(self.parent, self.text_obj)
class ChoiceSetting(SettingBase):
"""
A text label next to a wx choice control.
"""
def __init__(self, parent, label, current_selection, selections, callback):
SettingBase.__init__(self, parent, label)
lb = wx.Choice(parent, -1, choices=selections)
self.Add(lb, 0, wx.ALIGN_CENTRE_VERTICAL|wx.ALIGN_LEFT)
lb.SetStringSelection(current_selection)
wx.EVT_CHOICE(parent, lb.GetId(), callback)
class BitmapChoice(wx.combo.OwnerDrawnComboBox):
def __init__(self, parent, id=-1, value=wx.EmptyString,
pos=wx.DefaultPosition, size=wx.DefaultSize,
choices=[], style=0, validator=wx.DefaultValidator, name=wx.ComboBoxNameStr, bitmaps=[]):
"""
BitmapChoice widget modified from the example posted by Torsten
in this thread: http://markmail.org/thread/rb3c7377nuvnjfph
"""
wx.combo.OwnerDrawnComboBox.__init__(self, parent, id, value,
pos, size, choices, style, validator, name )
self.bitmaps = bitmaps[:]
# Overridden from OwnerDrawnComboBox, called to draw each
# item in the list
def OnDrawItem(self, dc, rect, item, flags):
if item == wx.NOT_FOUND:
# painting the control, but there is no valid item selected yet
return
r = wx.Rect(*rect) # make a copy
#r.Deflate(3, 5)
if flags & wx.combo.ODCB_PAINTING_CONTROL:
# for painting the control itself
dc.DrawBitmap( self.bitmaps[item], r.x+12, r.y+2, True)
else:
# for painting the items in the popup
dc.DrawBitmap( self.bitmaps[item], r.x+12, r.y+2, True )
# Overridden from OwnerDrawnComboBox, should return the height
# needed to display an item in the popup, or -1 for default
def OnMeasureItem(self, item):
# Simply demonstrate the ability to have variable-height items
return 20
# Overridden from OwnerDrawnComboBox. Callback for item width, or
# -1 for default/undetermined
def OnMeasureItemWidth(self, item):
return 50; # default - will be measured from text width
| gpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/ipykernel/kernelapp.py | 5 | 19344 | """An Application for launching a kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
import os
import sys
import signal
import traceback
import logging
from tornado import ioloop
import zmq
from zmq.eventloop import ioloop as zmq_ioloop
from zmq.eventloop.zmqstream import ZMQStream
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.utils import io
from ipython_genutils.path import filefind, ensure_dir_exists
from traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName, Type, default
)
from ipython_genutils.importstring import import_item
from jupyter_core.paths import jupyter_runtime_dir
from jupyter_client import write_connection_file
from jupyter_client.connect import ConnectionFileMixin
# local imports
from .iostream import IOPubThread
from .heartbeat import Heartbeat
from .ipkernel import IPythonKernel
from .parentpoller import ParentPollerUnix, ParentPollerWindows
from jupyter_client.session import (
Session, session_flags, session_aliases,
)
from .zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'transport': 'IPKernelApp.transport',
})
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp,
ConnectionFileMixin):
name='ipython-kernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [IPythonKernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = Type('ipykernel.ipkernel.IPythonKernel',
klass='ipykernel.kernelbase.Kernel',
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""").tag(config=True)
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat, allow_none=True)
ports = Dict()
subcommands = {
'install': (
'ipykernel.kernelspec.InstallIPythonKernelSpecApp',
'Install the IPython kernel'
),
}
# connection info:
connection_dir = Unicode()
@default('connection_dir')
def _default_connection_dir(self):
return jupyter_runtime_dir()
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.connection_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, help="redirect stdout to the null device").tag(config=True)
no_stderr = Bool(False, help="redirect stderr to the null device").tag(config=True)
outstream_class = DottedObjectName('ipykernel.iostream.OutStream',
help="The importstring for the OutStream factory").tag(config=True)
displayhook_class = DottedObjectName('ipykernel.displayhook.ZMQDisplayHook',
help="The importstring for the DisplayHook factory").tag(config=True)
# polling
parent_handle = Integer(int(os.environ.get('JPY_PARENT_PID') or 0),
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""").tag(config=True)
interrupt = Integer(int(os.environ.get('JPY_INTERRUPT_EVENT') or 0),
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""").tag(config=True)
def init_crash_handler(self):
sys.excepthook = self.excepthook
def excepthook(self, etype, evalue, tb):
# write uncaught traceback to 'real' stderr, not zmq-forwarder
traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.connection_file = filefind(self.connection_file, ['.', self.connection_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, and I'll create it in this directory:
ensure_dir_exists(os.path.dirname(self.abs_connection_file), 0o700)
# Also, I will clean it up:
atexit.register(self.cleanup_connection_file)
return
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_socket.linger = 1000
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_socket.linger = 1000
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_socket.linger = 1000
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
self.init_iopub(context)
def init_iopub(self, context):
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_socket.linger = 1000
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.configure_tornado_logger()
self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
self.iopub_thread.start()
# backward-compat: wrap iopub socket API in background thread
self.iopub_socket = self.iopub_thread.background_socket
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.connection_dir:
# use shortname
tail = basename
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
# unless log-level is CRITICAL (--quiet)
if not self.parent_handle and self.log_level < logging.CRITICAL:
io.rprint(_ctrl_c_message)
for line in lines:
io.rprint(line)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_thread, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_thread, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
self.displayhook = displayhook_factory(self.session, self.iopub_socket)
sys.displayhook = self.displayhook
self.patch_io()
def patch_io(self):
"""Patch important libraries that can't handle sys.stdout forwarding"""
try:
import faulthandler
except ImportError:
pass
else:
# Warning: this is a monkeypatch of `faulthandler.enable`, watch for possible
# updates to the upstream API and update accordingly (up-to-date as of Python 3.5):
# https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
# change default file to __stderr__ from forwarded stderr
faulthandler_enable = faulthandler.enable
def enable(file=sys.__stderr__, all_threads=True, **kwargs):
return faulthandler_enable(file=file, all_threads=all_threads, **kwargs)
faulthandler.enable = enable
if hasattr(faulthandler, 'register'):
faulthandler_register = faulthandler.register
def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwargs):
return faulthandler_register(signum, file=file, all_threads=all_threads,
chain=chain, **kwargs)
faulthandler.register = register
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = self.kernel_class.instance
kernel = kernel_factory(parent=self, session=self.session,
shell_streams=[shell_stream, control_stream],
iopub_thread=self.iopub_thread,
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports({
name + '_port': port for name, port in self.ports.items()
})
self.kernel = kernel
# Allow the displayhook to get the execution count
self.displayhook.get_execution_count = lambda: kernel.execution_count
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Register inline backend as default
# this is higher priority than matplotlibrc,
# but lower priority than anything else (mpl.use() for instance).
# This only affects matplotlib >= 1.5
if not os.environ.get('MPLBACKEND'):
os.environ['MPLBACKEND'] = 'module://ipykernel.pylab.backend_inline'
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace error-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=sys.stderr)
print (shell.InteractiveTB.stb2text(stb), file=sys.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = getattr(self.kernel, 'shell', None)
if self.shell:
self.shell.configurables.append(self)
def init_extensions(self):
super(IPKernelApp, self).init_extensions()
# BEGIN HARDCODED WIDGETS HACK
# Ensure ipywidgets extension is loaded if available
extension_man = self.shell.extension_manager
if 'ipywidgets' not in extension_man.loaded:
try:
extension_man.load_extension('ipywidgets')
except ImportError as e:
self.log.debug('ipywidgets package not installed. Widgets will not be available.')
# END HARDCODED WIDGETS HACK
def configure_tornado_logger(self):
""" Configure the tornado logging.Logger.
Must set up the tornado logger or else tornado will call
basicConfig for the root logger which makes the root logger
go to the real sys.stderr instead of the capture streams.
This function mimics the setup of logging.basicConfig.
"""
logger = logging.getLogger('tornado')
handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
@catch_config_error
def initialize(self, argv=None):
super(IPKernelApp, self).initialize(argv)
if self.subapp is not None:
return
# register zmq IOLoop with tornado
zmq_ioloop.install()
self.init_blackhole()
self.init_connection_file()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.write_connection_file()
# Log connection info after writing connection file, so that the connection
# file is definitely available at the time someone reads the log.
self.log_connection_info()
self.init_io()
self.init_signal()
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
if self.shell:
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.subapp is not None:
return self.subapp.start()
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| gpl-3.0 |
saiguruprasad/Kinematic-Synthesis | Python 2.x/ksynpy.py | 1 | 3887 | """
Copyright (C) 2015 Sai Guruprasad Jakkala, G V Balakrishna
This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General
Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any
later version. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
"""
import cmath as cm
import numpy.linalg as lm
import numpy as np
import sympy as sp
from __future__ import division
import matplotlib.pyplot as plt
def lpcs(w2,w3,w4,a2,a3,a4):
r2 = w4*(complex(a3,w3**2)) - w3*(complex(a4,w4**2));
r3 = w2*(complex(a4,w4**2)) - w4*(complex(a2,w2**2));
r4 = w3*(complex(a2,w2**2)) - w2*(complex(a3,w3**2));
r1 = -r2-r3-r4;
r1_l=abs(r1); r1_a=cm.phase(r1)*180/cm.pi;
r2_l=abs(r2); r2_a=cm.phase(r2)*180/cm.pi;
r3_l=abs(r3); r3_a=cm.phase(r3)*180/cm.pi;
r4_l=abs(r4); r4_a=cm.phase(r4)*180/cm.pi;
print 'Link 1 Length :',r1_l,'Angle :',r1_a
print 'Link 2 Length :',r2_l,'Angle :',r2_a
print 'Link 3 Length :',r3_l,'Angle :',r3_a
print 'Link 4 Length :',r4_l,'Angle :',r4_a
a=180-r1_a;
plt.hold('on')
plt.plot([0,r2_l*np.cos((a+r2_a)*np.pi/180),r3_l*np.cos((a+r3_a)*np.pi/180),r1_l*np.cos((a+r4_a)*np.pi/180)],[0,r2_l*np.sin((a+r2_a)*np.pi/180),r3_l*np.sin((a+r3_a)*np.pi/180),0],color='k');
#ax.annotate('A',(r2_l*np.cos((a+r2_a)*np.pi/180),r2_l*np.sin((a+r2_a)*np.pi/180)))
return r1,r2,r3,r4;
def thpos(d2,d3,gamma2,gamma3,psi2,psi3,phi2,phi3):
Al=[[cm.exp(psi2*cm.pi*1j/180)-1,cm.exp(gamma2*cm.pi*1j/180)-1],[cm.exp(psi3*cm.pi*1j/180)-1,cm.exp(gamma3*cm.pi*1j/180)-1]];
Bl=[[d2],[d3]];
Cl=lm.solve(Al,Bl);
l1=Cl[0]; l2=Cl[1];
Ar=[[cm.exp(phi2*cm.pi*1j/180)-1,cm.exp(gamma2*cm.pi*1j/180)-1],[cm.exp(phi3*cm.pi*1j/180)-1,cm.exp(gamma3*cm.pi*1j/180)-1]];
Br=[[d2],[d3]];
Cr=lm.solve(Ar,Br);
l3=Cr[0]; l4=Cr[1];
l5=l2-l4;
l6=l1+l5-l3;
return l1,l2,l3,l4,l5,l6;
def frst(f1,x0,x_n,n,psi1,psi2,phi1,phi2):
f2=sp.simplify(f1);
xf=np.zeros(n+2);
yf=np.zeros(n+2);
xf[0]=x0; yf[0]=f2.subs({'x':x0}).evalf();
xf[n+1]=x_n; yf[n+1]=f2.subs({'x':x_n}).evalf();
for k in range(3):
xf[k+1]=(x0+x_n)/2.0 - (((x_n-x0)/2.0)*np.cos((2*(k+1)-1)*np.pi/(2*n)));
yf[k+1]=f2.subs({'x':xf[k+1]}).evalf();
a2,b2=sp.symbols('a2,b2');
eqs=(a2*x0 + b2 - psi1, a2*x_n + b2 - psi2);
loo=sp.solve(eqs,[b2,a2]);
a1=loo[a2]; b1=loo[b2];
eqs=(a2*yf[0] + b2 - phi1, a2*yf[n+1] + b2 - phi2);
loo=sp.solve(eqs,[b2,a2]);
c1=loo[a2]; d1=loo[b2];
psi=np.zeros(n+2); phi=np.zeros(n+2);
psi[n-n]=psi1; psi[n+1]=psi2; phi[n-n]=phi1; phi[n+1]=phi2;
for k in range(3):
psi[k+1]=a1*xf[k+1]+b1;
phi[k+1]=c1*yf[k+1]+d1;
k11,k22,k33=sp.symbols('k11,k22,k33');
eqn1=k11*np.cos(psi[1]*np.pi/180)+k22*np.cos(phi[1]*np.pi/180)+k33-np.cos((psi[1]-phi[1])*np.pi/180);
eqn2=k11*np.cos(psi[2]*np.pi/180)+k22*np.cos(phi[2]*np.pi/180)+k33-np.cos((psi[2]-phi[2])*np.pi/180);
eqn3=k11*np.cos(psi[3]*np.pi/180)+k22*np.cos(phi[3]*np.pi/180)+k33-np.cos((psi[3]-phi[3])*np.pi/180);
las=sp.solve([eqn1,eqn2,eqn3],[k33,k22,k11]);
k1=las[k11];
k2=las[k22];
k3=las[k33];
r1=1;
r2=r1/k2;
r4=r1/k1;
r3=((2*r2*r4*k3)+r1**2+r2**2+r4**2)**0.5;
col=['r','b','g','k','r'];
for k in range(5):
plt.hold('on')
plt.plot([0,r2*cos(psi[k]*pi/180),r4*cos((phi[k]-180)*pi/180),1],[0,r2*sin(psi[k]*pi/180),r4*sin((phi[k]-180)*pi/180),0],color=col[k]);
return r1,r2,r3,r4,psi,phi;
| gpl-3.0 |
bashkirtsevich/autocode | text_preprocessing/spell_checker.py | 1 | 1664 | import pandas as pd
from enchant import Dict
class SpellChecker(object):
# exclusion_words_path -- path to nn_model x_dict.csv
def __init__(self, exclusion_words_path, **kwargs):
self._dicts = {key: Dict(value) for key, value in kwargs.items()}
self._exclusion_dict = pd.read_csv(
exclusion_words_path,
sep=",",
engine="python",
names=("id", "word", "count", "weight")
)
def _check_words(self, words, dict_key):
dict = self._dicts[dict_key]
for word in words:
if not self._exclusion_dict[self._exclusion_dict["word"] == word].empty:
yield True, word, []
else:
valid = dict.check(word)
suggest = dict.suggest(word) if not valid else None
yield valid, word, suggest
def check_text(self, text, dict_key):
if dict_key in self._dicts:
suggests = {}
# Split input string to list of words
words = text.split()
# Check each word
for check_result in self._check_words(words=words, dict_key=dict_key):
if check_result:
valid, word, suggest = check_result
if not valid and not word in suggests:
suggests[word] = suggest
if suggests:
return {"valid": False, "suggests": suggests}
else:
return {"valid": True}
else:
return {"error": "No such dictionary \"{0}\"".format(dict_key)}
@property
def dicts(self):
return self._dicts.keys()
| gpl-3.0 |
JackKelly/neuralnilm_prototype | scripts/e375.py | 2 | 7069 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=256,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
# clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
500: 1e-4,
1500: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
}
]
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=partial(scaled_cost3, ignore_inactive=False),
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=partial(scaled_cost3, ignore_inactive=True),
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abc')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=2000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
bundgus/python-playground | matplotlib-playground/examples/api/custom_projection_example.py | 1 | 18187 | from __future__ import unicode_literals
import matplotlib
from matplotlib.axes import Axes
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import NullLocator, Formatter, FixedLocator
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
import matplotlib.axis as maxis
import numpy as np
# This example projection class is rather long, but it is designed to
# illustrate many features, not all of which will be used every time.
# It is also common to factor out a lot of these methods into common
# code used by a number of projections with similar characteristics
# (see geo.py).
class HammerAxes(Axes):
"""
A custom class for the Aitoff-Hammer projection, an equal-area map
projection.
http://en.wikipedia.org/wiki/Hammer_projection
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='custom_hammer')``.
name = 'custom_hammer'
def __init__(self, *args, **kwargs):
Axes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until HammerAxes.xaxis.cla() works.
#self.spines['hammer'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
"""
Override to set up some reasonable defaults.
"""
# Don't forget to call the base class
Axes.cla(self)
# Set up a default grid spacing
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
# Turn off minor ticking altogether
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
# Do not display ticks -- we only want gridlines and text
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
# The limits on this projection are fixed -- they are not to
# be changed by the user. This makes the math in the
# transformation itself easier, and since this is a toy
# example, the easier, the better.
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space (in this case longitude and latitude) to axes
# space. It is separated into a non-affine and affine part so
# that the non-affine part does not have to be recomputed when
# a simple affine change to the figure has been made (such as
# resizing the window or changing the dpi).
# 1) The core transformation from data space into
# rectilinear space defined in the HammerTransform class.
self.transProjection = self.HammerTransform()
# 2) The above has an output range that is not in the unit
# rectangle, so scale and translate it so it fits correctly
# within the axes. The peculiar calculations of xscale and
# yscale are specific to a Aitoff-Hammer projection, so don't
# worry about them too much.
xscale = 2.0 * np.sqrt(2.0) * np.sin(0.5 * np.pi)
yscale = np.sqrt(2.0) * np.sin(0.5 * np.pi)
self.transAffine = Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# The main data transformation is set up. Now deal with
# gridlines and tick labels.
# Longitude gridlines and ticklabels. The input to these
# transforms are in display space in x and axes space in y.
# Therefore, the input values will be in range (-xmin, 0),
# (xmax, 1). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the equator.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, np.pi) \
.translate(0.0, -np.pi)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# Now set up the transforms for the latitude ticks. The input to
# these transforms are in axes space in x and display space in
# y. Therefore, the input values will be in range (0, -ymin),
# (1, ymax). The goal of these transforms is to go from that
# space to display space. The tick labels will be offset 4
# pixels from the edge of the axes ellipse.
yaxis_stretch = Affine2D().scale(2*np.pi, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space +
self.transAffine +
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def get_xaxis_transform(self, which='grid'):
"""
Override this method to provide a transformation for the
x-axis grid and ticks.
"""
assert which in ['tick1', 'tick2', 'grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary x-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self, which='grid'):
"""
Override this method to provide a transformation for the
y-axis grid and ticks.
"""
assert which in ['tick1', 'tick2', 'grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pixelPad):
"""
Override this method to provide a transformation for the
secondary y-axis tick labels.
Returns a tuple of the form (transform, valign, halign)
"""
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
"""
Override this method to define the shape that is used for the
background of the plot. It should be a subclass of Patch.
In this case, it is a Circle (that may be warped by the axes
transform into an ellipse). Any data and gridlines will be
clipped to this shape.
"""
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'custom_hammer': mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. In our case, we
# want to display the whole sphere all the time, so we override
# set_xlim and set_ylim to ignore any input. This also applies to
# interactive panning and zooming in the GUI interfaces.
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, lon, lat):
"""
Override this method to change how the values are displayed in
the status bar.
In this case, we want them to be displayed in degrees N/S/E/W.
"""
lon = np.degrees(lon)
lat = np.degrees(lat)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
# \u00b0 : degree symbol
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
class DegreeFormatter(Formatter):
"""
This is a custom formatter that converts the native unit of
radians into (truncated) degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = round(np.degrees(x) / self._round_to) * self._round_to
# \u00b0 : degree symbol
return "%d\u00b0" % degrees
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface to set the
ticking than set_xticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
plt.FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.xaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
This is an example method that is specific to this projection
class -- it provides a more convenient interface than
set_yticks would.
"""
# Set up a FixedLocator at each of the points, evenly spaced
# by degrees.
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
# Set the formatter to display the tick labels in degrees,
# rather than radians.
self.yaxis.set_major_formatter(self.DegreeFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
Often, in geographic projections, you wouldn't want to draw
longitude gridlines near the poles. This allows the user to
specify the degree at which to stop drawing longitude grids.
This is an example method that is specific to this projection
class -- it provides an interface to something that has no
analogy in the base Axes class.
"""
longitude_cap = np.radians(degrees)
# Change the xaxis gridlines transform so that it draws from
# -degrees to degrees, rather than -pi to pi.
self._xaxis_pretransform \
.clear() \
.scale(1.0, longitude_cap * 2.0) \
.translate(0.0, -longitude_cap)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
# Interactive panning and zooming is not supported with this projection,
# so we override all of the following methods to disable it.
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
# Now, the transforms themselves.
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_non_affine(self, ll):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform()
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2*np.arctan((z*x)/(2.0*(2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
return HammerAxes.HammerTransform()
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(HammerAxes)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Now make a simple example using the custom projection.
plt.subplot(111, projection="custom_hammer")
p = plt.plot([-1, 1, 1], [-1, -1, 1], "o-")
plt.grid(True)
plt.show()
| mit |
sumspr/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
thekerrlab/netpyne | netpyne/support/scalebar.py | 1 | 3571 | # -*- coding: utf-8 -*-
# -*- mode: python -*-
# Adapted from mpl_toolkits.axes_grid1
# LICENSE: Python Software Foundation (http://docs.python.org/license.html)
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from matplotlib.offsetbox import AnchoredOffsetbox
class AnchoredScaleBar(AnchoredOffsetbox):
def __init__(self, transform, sizex=0, sizey=0, labelx=None, labely=None, loc=4,
pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None,
**kwargs):
"""
Draw a horizontal and/or vertical bar with the size in data coordinate
of the give axes. A label will be drawn underneath (center-aligned).
- transform : the coordinate frame (typically axes.transData)
- sizex,sizey : width of x,y bar, in data units. 0 to omit
- labelx,labely : labels for x,y bars; None to omit
- loc : position in containing axes
- pad, borderpad : padding, in fraction of the legend font size (or prop)
- sep : separation between labels and bars in points.
- **kwargs : additional arguments passed to base class constructor
"""
from matplotlib.patches import Rectangle
from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea
bars = AuxTransformBox(transform)
if sizex:
bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none"))
if sizey:
bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none"))
if sizex and labelx:
self.xlabel = TextArea(labelx, minimumdescent=False)
bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep)
if sizey and labely:
self.ylabel = TextArea(labely)
bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=bars, prop=prop, frameon=False, **kwargs)
def add_scalebar(ax, matchx=True, matchy=True, hidex=True, hidey=True, unitsx='', unitsy='', scalex=1, scaley=1, **kwargs):
""" Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot
and optionally hiding the x and y axes
- ax : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks
if False, size should be set using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def f(axis):
l = axis.get_majorticklocs()
return len(l)>1 and (l[1] - l[0])
if matchx: kwargs['sizex'] = f(ax.xaxis)
if matchy: kwargs['sizey'] = f(ax.yaxis)
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex']*scalex,unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey']*scaley,unitsy)
sb = AnchoredScaleBar(ax.transData, **kwargs)
ax.add_artist(sb)
if hidex : ax.xaxis.set_visible(False)
if hidey : ax.yaxis.set_visible(False)
if hidex and hidey: ax.set_frame_on(False)
return sb | mit |
NifTK/NiftyNet | tests/resampler_grid_warper_test.py | 1 | 13970 | from __future__ import absolute_import, print_function, division
import base64
import numpy as np
import tensorflow as tf
from niftynet.layer.grid_warper import AffineGridWarperLayer
from niftynet.layer.resampler import ResamplerLayer
from tests.niftynet_testcase import NiftyNetTestCase
test_case_2d_1 = {
'data': "+/b9/+3/377dpX+Mxp+Y/9nT/d/X6vfMuf+hX/hSY/1pvf/P9/z//+///+7z"
"//ve19noiHuXVjlVSCUpwpyH/9i/9+LDwuufS84yGOYKGOgYQspG2v7Q/uXg"
"07aonZBtS1NqWVRycl9zZEY86sSf/+u/7uezlNlvIdYPA/8AAP8AK+MfgMRd"
"f3JGVzYTdV0xW2d9Y2N7c2NuZEgz58CV/+S66OS1jdt2KOclAP8AAP8AFtkB"
"V6Ema1wjkmZDkmdFXGd5XltwdWFqdldF8c2r/+/V//7szP/JOs9AC+gNGvkS"
"P9YlrNp4fl41kVdDj1ZDYWN8ZFdzblFjfVpU/+/a//Hp/e718P/2v/+8bOdb"
"auVOtv6Q9fW/om9eiEg/oGFYXFR9e2GOdEttbkZO7tPI//v2//P/+/f47PjQ"
"3Pmn3fmi3eGm/+rRyZCHhEg9l19Oal2TbU6HeUp2lm17x7Wn5eXZ7e7w9evp"
"+OXH/+yz+uWs3b+b/9/N3a6ebj8lg1Y1ZFyNcFWIelB0fFde2Mu48fjm+f/7"
"+PPt9uLH/+m6/+W24cSk/+TNz62SUS0LeVYuYGGAa1x9dFRpdldS9OXO/P3r"
"8vb1//78//bg8OG28d6z/OjH/+nLwqWHbksrh2JFWmB6ZWB2aVVedl9R893F"
"//Hl//r/++/z//Xh/PDG9Oa38Nqx/uC+ontcek04kWFVYWWKX1x5bWBqZE0/"
"8dO7/+re89HS//Xx/uvK7+Cp/++1/+u74rWMhE8vilJBk1lYWVmNX1iCbF1y"
"VToz58Gs/9rH/tLF/+DG/+y2/uej/+Ki/92pq3hLjVcxlFtHkVZSbGmYTkNt"
"gmqCWzg22K2a/+TL/93C++C1++eq+OOi/+q489GsfVk3dlArkGRJkGFR3dnw"
"lIadXT5NSiEdvpOA/93C8+DA8+rB+PLA/PDI//fn//v47eHVpph9cVo7ZkYt"
"/f37//f678zQxpeRrYJx993G8OvO7vTQ8PbU/fvs/Pj/9/n/9///+P/t8OnM"
"4s2u".encode('ascii'),
'shape': (16, 16, 3)
}
test_case_2d_target = {
# [[0.96592583, -0.25881905, 2.34314575],
# [0.25881905, 0.96592583, -1.79795897]]
'data': "////19jdbXKIZFl3TC5GVzM1yaKR/9vN/ODU7vnR2v/M0v7N9f/2///9////"
"////////pau3Vlx2aF90aFFXkW5a8c+s/uTD6+a8sOiPauRTR/M9a/102P7n"
"/v7///v////9dYGPXmB1cWVzX0c7v5dz/t+z++q8wN+RWdQ9E98ECO8DINkj"
"keSW//76/+z49vf5YmR7X1duc11pdFRF6cGe/+fD9OvEoNyENuIuAv0AAP8B"
"Gu4Qd9thx8mi07Gly8nWZFmBc1l8bUtck3Jp//Te//Ll/f7wxP7DKdIvAPUA"
"BP8CE9wAVKspbWguWjgToZq8bVaOeE5+b0NcqoSD/vTo//T4/fP74f7of+19"
"KugkLPMeTNUvjrhWclgnlmhHc2yYb1SLdkt5jGF1u6OZ5uDU9/L4+/T88fni"
"zPirletwmfF21P6ox7mKhlI8klVDYmGAblp/eVJve1db1ci36+/e8PTz9Ozq"
"+OjO+fC18Pas3eKg+vDM06WVj1BHllZNXWF6aVxwbFFYkXps/fPY+v7v+P35"
"+fLq9+LF/+m3/eOw3L6a/+DO2qmbg0k7lVxLX2B/aF1tZVBLuaOM//Db/fr1"
"+Pn7//309+zM9+K19dyz5suu/N3IwpmDYjcXkGdJYFmFa19zWEE52ryk/+zd"
"/OPm/O/2/fPp/PLP8uS39uK9/+7Q6tC1lHNUXTkVr5aAUUVtemR5XT8368Ww"
"/9zM987I//Ho/+zM8OKx+ey3896z/+fDwJ9+f1o/gFtA2tDGbVlyVTQ/dlBH"
"6sau/uTL/9fB/uK9/+yx+eai/+qr/ee247OLilk7gk88kWFX+Pf13MPJj2Zk"
"kmZZ68as/eLE+eG9+uWw+uWk/OSk/uWs4rqJn2w/iE8xkVVKpXNy/////vj4"
"7NDPuJKF79G58ebK8e3I9fPD+++9/vHO/+vQr45vcEgkiVg4lV1QxaOi////"
"//////////////78+fnv9Pni8PfY/frn/Pj5/f3/9+7lp5Z8eFo4gVdB5drW"
"////".encode("ASCII"),
'shape': (16, 16, 3)
}
def get_2d_images(test_case):
try:
out = base64.decodebytes(test_case['data'])
except AttributeError:
out = base64.decodestring(test_case['data'])
out = np.frombuffer(out, dtype=np.uint8)
out = out.reshape(test_case['shape'])
return out, out.shape
def get_multiple_2d_images():
image_1, shape = get_2d_images(test_case_2d_1)
image_2 = image_1[::-1, ::-1]
image_3 = image_1[::-1, ]
image_4 = image_1[:, ::-1, ]
return np.stack([image_1, image_2, image_3, image_4]), [4] + list(shape)
def get_multiple_2d_rotated_targets():
image_1, shape = get_2d_images(test_case_2d_target)
image_2 = image_1[::-1, ::-1]
image_3 = image_1[::-1, ]
image_4 = image_1[:, ::-1, ]
return np.stack([image_1, image_2, image_3, image_4]), [4] + list(shape)
def get_multiple_2d_targets():
test_image, input_shape = get_multiple_2d_images()
test_target = np.array(test_image)
test_target[0] = test_target[0, ::-1]
test_target[1] = test_target[1, :, ::-1]
test_target[2] = test_target[2, ::-1, ::-1]
factor = 1.5
shape = input_shape[:]
shape[1] = np.floor(input_shape[1] * factor).astype(np.int)
shape[2] = np.floor(input_shape[2] * factor).astype(np.int)
from scipy.ndimage import zoom
zoomed_target = []
for img in test_target:
zoomed_target.append(zoom(img, [factor, factor, 1]))
test_target = np.stack(zoomed_target, axis=0).astype(np.uint8)
return test_target, shape
def get_multiple_3d_images():
image_1, shape = get_2d_images(test_case_2d_1)
image_2 = image_1[::-1, ::-1]
image_3 = image_1[::-1, ]
image_4 = image_1[:, ::-1, ]
image_2d = np.stack([image_1, image_2, image_3, image_4])
image_3d = np.expand_dims(image_2d, axis=1)
image_3d = np.concatenate([image_3d, image_3d], axis=1)
return image_3d, image_3d.shape
def get_multiple_3d_targets():
test_image, input_shape = get_multiple_2d_images()
test_target = np.array(test_image)
test_target[0] = test_target[0, ::-1]
test_target[1] = test_target[1, :, ::-1]
test_target[2] = test_target[2, ::-1, ::-1]
factor = 1.5
shape = input_shape[:]
shape[1] = np.floor(input_shape[1] * factor).astype(np.int)
shape[2] = np.floor(input_shape[2] * factor).astype(np.int)
from scipy.ndimage import zoom
zoomed_target = []
for img in test_target:
zoomed_target.append(zoom(img, [factor, factor, 1]))
test_target = np.stack(zoomed_target, axis=0).astype(np.uint8)
test_target = np.expand_dims(test_target, axis=1)
test_target = np.concatenate([test_target, test_target], axis=1)
return test_target, test_target.shape
def get_3d_input1():
test_case = tf.constant(
[[[[1, 2, -1], [3, 4, -2]], [[5, 6, -3], [7, 8, -4]]],
[[[9, 10, -5], [11, 12, -6]], [[13, 14, -7], [15, 16, -8]]]],
dtype=tf.float32)
return tf.expand_dims(test_case, 4)
class ResamplerGridWarperTest(NiftyNetTestCase):
def _test_correctness(
self, inputs, grid, interpolation, boundary, expected_value):
resampler = ResamplerLayer(
interpolation=interpolation, boundary=boundary)
out = resampler(inputs, grid)
with self.cached_session() as sess:
out_value = sess.run(out)
self.assertAllClose(expected_value, out_value)
def test_combined(self):
expected = [[[[[1], [-1]], [[3], [-2]]],
[[[5], [-3]], [[7], [-4]]]],
[[[[9.5], [-5]], [[11.5], [-6]]],
[[[13.5], [-7]], [[15.5], [-8]]]]]
affine_grid = AffineGridWarperLayer(source_shape=(2, 2, 3),
output_shape=(2, 2, 2))
test_grid = affine_grid(
tf.constant([[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, .5]],
dtype=tf.float32))
self._test_correctness(inputs=get_3d_input1(),
grid=test_grid,
interpolation='idw',
boundary='replicate',
expected_value=expected)
class image_test(NiftyNetTestCase):
def _test_grads_images(self,
interpolation='linear',
boundary='replicate',
ndim=2):
if ndim == 2:
test_image, input_shape = get_multiple_2d_images()
test_target, target_shape = get_multiple_2d_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.]] * 4
else:
test_image, input_shape = get_multiple_3d_images()
test_target, target_shape = get_multiple_3d_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.,
1., 0., 0., 0., 1., 0.]] * 4
affine_var = tf.get_variable('affine', initializer=identity_affine)
grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
output_shape=target_shape[1:-1],
constraints=None)
warp_coords = grid(affine_var)
resampler = ResamplerLayer(interpolation, boundary=boundary)
new_image = resampler(tf.constant(test_image, dtype=tf.float32),
warp_coords)
diff = tf.reduce_mean(tf.squared_difference(
new_image, tf.constant(test_target, dtype=tf.float32)))
optimiser = tf.train.AdagradOptimizer(0.01)
grads = optimiser.compute_gradients(diff)
opt = optimiser.apply_gradients(grads)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
init_val, affine_val = sess.run([diff, affine_var])
for _ in range(5):
_, diff_val, affine_val = sess.run([opt, diff, affine_var])
print('{}, {}'.format(diff_val, affine_val[0]))
self.assertGreater(init_val, diff_val)
def test_2d_linear_replicate(self):
self._test_grads_images('linear', 'replicate')
def test_2d_idw_replicate(self):
self._test_grads_images('idw', 'replicate')
def test_2d_linear_circular(self):
self._test_grads_images('linear', 'circular')
def test_2d_idw_circular(self):
self._test_grads_images('idw', 'circular')
def test_2d_linear_symmetric(self):
self._test_grads_images('linear', 'symmetric')
def test_2d_idw_symmetric(self):
self._test_grads_images('idw', 'symmetric')
def test_3d_linear_replicate(self):
self._test_grads_images('linear', 'replicate', ndim=3)
def test_3d_idw_replicate(self):
self._test_grads_images('idw', 'replicate', ndim=3)
def test_3d_linear_circular(self):
self._test_grads_images('linear', 'circular', ndim=3)
def test_3d_idw_circular(self):
self._test_grads_images('idw', 'circular', ndim=3)
def test_3d_linear_symmetric(self):
self._test_grads_images('linear', 'symmetric', ndim=3)
def test_3d_idw_symmetric(self):
self._test_grads_images('idw', 'symmetric', ndim=3)
class image_2D_test_converge(NiftyNetTestCase):
def _test_simple_2d_images(self,
interpolation='linear',
boundary='replicate'):
# rotating around the center (8, 8) by 15 degree
expected = [[0.96592583, -0.25881905, 2.34314575],
[0.25881905, 0.96592583, -1.79795897]]
expected = np.asarray(expected).flatten()
test_image, input_shape = get_multiple_2d_images()
test_target, target_shape = get_multiple_2d_rotated_targets()
identity_affine = [[1., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 1., 0.],
[1., 0., 0., 0., 1., 0.]]
affine_var = tf.get_variable('affine', initializer=identity_affine)
grid = AffineGridWarperLayer(source_shape=input_shape[1:-1],
output_shape=target_shape[1:-1],
constraints=None)
warp_coords = grid(affine_var)
resampler = ResamplerLayer(interpolation, boundary=boundary)
new_image = resampler(tf.constant(test_image, dtype=tf.float32),
warp_coords)
diff = tf.reduce_mean(tf.squared_difference(
new_image, tf.constant(test_target, dtype=tf.float32)))
learning_rate = 0.05
if(interpolation == 'linear') and (boundary == 'zero'):
learning_rate = 0.0003
optimiser = tf.train.AdagradOptimizer(learning_rate)
grads = optimiser.compute_gradients(diff)
opt = optimiser.apply_gradients(grads)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
init_val, affine_val = sess.run([diff, affine_var])
# compute the MAE between the initial estimated parameters and the expected parameters
init_var_diff = np.sum(np.abs(affine_val[0] - expected))
for it in range(500):
_, diff_val, affine_val = sess.run([opt, diff, affine_var])
# print('{} diff: {}, {}'.format(it, diff_val, affine_val[0]))
# import matplotlib.pyplot as plt
# plt.figure()
# plt.imshow(test_target[0])
# plt.draw()
# plt.figure()
# plt.imshow(sess.run(new_image).astype(np.uint8)[0])
# plt.draw()
# plt.show()
self.assertGreater(init_val, diff_val)
# compute the MAE between the final estimated parameters and the expected parameters
var_diff = np.sum(np.abs(affine_val[0] - expected))
self.assertGreater(init_var_diff, var_diff)
print('{} {} -- diff {}'.format(
interpolation, boundary, var_diff))
print('{}'.format(affine_val[0]))
def test_2d_linear_zero_converge(self):
self._test_simple_2d_images('linear', 'zero')
def test_2d_linear_replicate_converge(self):
self._test_simple_2d_images('linear', 'replicate')
def test_2d_idw_replicate_converge(self):
self._test_simple_2d_images('idw', 'replicate')
def test_2d_linear_circular_converge(self):
self._test_simple_2d_images('linear', 'circular')
def test_2d_idw_circular_converge(self):
self._test_simple_2d_images('idw', 'circular')
def test_2d_linear_symmetric_converge(self):
self._test_simple_2d_images('linear', 'symmetric')
def test_2d_idw_symmetric_converge(self):
self._test_simple_2d_images('idw', 'symmetric')
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
wright-group/WrightTools | tests/data/trim.py | 1 | 2731 | #! /usr/bin/env python3
"""Test channel.trim."""
# --- import --------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
import matplotlib.pyplot as plt
# --- test ----------------------------------------------------------------------------------------
def test_trim_2Dgauss():
# create original arrays
x = np.linspace(-3, 3, 31)[:, None]
y = np.linspace(-3, 3, 31)[None, :]
arr = np.exp(-1 * (x ** 2 + y ** 2))
# create damaged array
arr2 = arr.copy()
np.random.seed(11) # set seed for reproducibility
arr2[np.random.random(arr2.shape) < 0.05] = 2
# create data object
d = wt.data.Data()
d.create_variable("x", values=x)
d.create_variable("y", values=y)
d.create_channel("original", arr)
d.create_channel("damaged1", arr2)
d.create_channel("damaged2", arr2)
d.create_channel("damaged3", arr2)
d.create_channel("damaged4", arr2)
d.transform("x", "y")
# trim
d.original.trim([2, 2], factor=2)
d.damaged1.trim([2, 2], factor=2)
d.damaged2.trim([2, 2], factor=2, replace="mean")
d.damaged3.trim([2, 2], factor=2, replace=0.5)
d.damaged4.trim([2, 2], factor=2, replace="exclusive_mean")
# now heal
d.create_channel("healed_linear", d.damaged1[:])
d.heal(channel="healed_linear", fill_value=0, method="linear")
# check
np.testing.assert_allclose(d.original[:], d.original[:], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(d.original[:], d.healed_linear[:], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(d.original[:], d.damaged2[:], rtol=1e-1, atol=9e-1)
np.testing.assert_allclose(d.original[:], d.damaged3[:], rtol=1e-1, atol=5e-1)
np.testing.assert_allclose(d.original[:], d.damaged4[:], rtol=1e-1, atol=3e-1)
def test_trim_3Dgauss():
# create original arrays
x = np.linspace(-3, 3, 31)[:, None, None]
y = np.linspace(-3, 3, 31)[None, :, None]
z = np.linspace(-3, 3, 31)[None, None, :]
arr = np.exp(-1 * (x ** 2 + y ** 2 + z ** 2))
# create damaged array
arr2 = arr.copy()
np.random.seed(11) # set seed for reproducibility
arr2[np.random.random(arr2.shape) < 0.05] = 1
# create data object
d = wt.data.Data()
d.create_variable("x", values=x)
d.create_variable("y", values=y)
d.create_variable("z", values=z)
d.create_channel("original", arr)
d.create_channel("damaged", arr2)
d.transform("x", "y", "z")
# trim
d.damaged.trim([2, 2, 2], factor=2, replace="mean")
# check
np.testing.assert_allclose(d.original[:], d.damaged[:], rtol=1e-1, atol=9e-1)
if __name__ == "__main__":
test_trim_2Dgauss()
test_trim_3Dgauss()
| mit |
bobbymckinney/seebeck_measurement | program_hightemp/old versions/SeebeckGUIv5.py | 2 | 98130 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Created: 2015-08-19
@author: Bobby McKinney ([email protected])
__Title__ : voltagepanel
Description:
Comments:
"""
import os
import sys
import wx
from wx.lib.pubsub import pub # For communicating b/w the thread and the GUI
import matplotlib
matplotlib.interactive(False)
matplotlib.use('WXAgg') # The recommended way to use wx with mpl is with WXAgg backend.
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
from matplotlib.pyplot import gcf, setp
import matplotlib.animation as animation # For plotting
import pylab
import numpy as np
import matplotlib.pyplot as plt
import minimalmodbus as modbus # For communicating with the cn7500s
import omegacn7500 # Driver for cn7500s under minimalmodbus, adds a few easy commands
import visa # pyvisa, essential for communicating with the Keithley
from threading import Thread # For threading the processes going on behind the GUI
import time
from datetime import datetime # for getting the current date and time
# Modules for saving logs of exceptions
import exceptions
import sys
from logging_utils import setup_logging_to_file, log_exception
# for a fancy status bar:
import EnhancedStatusBar as ESB
#==============================================================================
# Keeps Windows from complaining that the port is already open:
modbus.CLOSE_PORT_AFTER_EACH_CALL = True
version = '5.0 (2015-08-31)'
'''
Global Variables:
'''
# Naming a data file:
dataFile = 'Data_Backup.csv'
finaldataFile = 'Data.csv'
statusFile = 'Status.csv'
seebeckFile = 'Seebeck.csv'
programFile = 'ProgramLog.txt'
APP_EXIT = 1 # id for File\Quit
stability_threshold = 0.25/60
oscillation = 8 # Degree range that the PID will oscillate in
tolerance = (oscillation/8) # This must be set to less than oscillation
measureList = []
dTlist = [0,-2,-4,-6,-8,-6,-4,-2,0,2,4,6,8,6,4,2,0]
maxLimit = 700 # Restricts the user to a max temperature
abort_ID = 0 # Abort method
# Global placers for instruments
k2700 = ''
heaterA = ''
heaterB = ''
tc_type = "k-type" # Set the thermocouple type in order to use the correct voltage correction
# Channels corresponding to switch card:
tempAChannel = '109'
tempBChannel = '110'
highVChannel = '107'
lowVChannel = '108'
# placer for directory
filePath = 'global file path'
# placer for files to be created
myfile = 'global file'
rawfile = 'global file'
processfile = 'global file'
# Placers for the GUI plots:
highV_list = []
thighV_list = []
lowV_list=[]
tlowV_list = []
tempA_list = []
ttempA_list = []
tempB_list = []
ttempB_list = []
tpid_list = []
pidA_list = []
pidB_list = []
timecalclist = []
Vhighcalclist = []
Vlowcalclist = []
dTcalclist = []
avgTcalclist = []
#ResourceManager for visa instrument control
ResourceManager = visa.ResourceManager()
###############################################################################
class Keithley_2700:
''' Used for the matrix card operations. '''
#--------------------------------------------------------------------------
def __init__(self, instr):
self.ctrl = ResourceManager.open_resource(instr)
#end init
#--------------------------------------------------------------------------
def fetch(self, channel):
"""
Scan the channel and take a reading
"""
self.ctrl.write(":ROUTe:SCAN:INTernal (@ %s)" % (channel)) # Specify Channel
#keithley.write(":SENSe1:FUNCtion 'TEMPerature'") # Specify Data type
self.ctrl.write(":ROUTe:SCAN:LSELect INTernal") # Scan Selected Channel
self.ctrl.write(":ROUTe:SCAN:LSELect NONE") # Stop Scan
data = self.ctrl.query(":FETCh?")
return str(data)[0:15] # Fetches Reading
#end def
#--------------------------------------------------------------------------
def openAllChannels(self):
self.ctrl.write("ROUTe:OPEN:ALL")
#end def
#end class
###############################################################################
###############################################################################
class PID(omegacn7500.OmegaCN7500):
#--------------------------------------------------------------------------
def __init__(self, portname, slaveaddress):
omegacn7500.OmegaCN7500.__init__(self, portname, slaveaddress)
#end init
#--------------------------------------------------------------------------
# Commands for easy reference:
# Use .write_register(command, value) and .read_register(command)
# All register values can be found in the Manual or Instruction Sheet.
# You must convert each address from Hex to Decimal.
control = 4101 # Register for control method
pIDcontrol = 0 # Value for PID control method
pIDparam = 4124 # Register for PID parameter selection
pIDparam_Auto = 4 # Value for Auto PID
tCouple = 4100 # Register for setting the temperature sensor type
tCouple_K = 0 # K type thermocouple
heatingCoolingControl = 4102 # Register for Heating/Cooling control selection
heating = 0 # Value for Heating setting
#end class
###############################################################################
###############################################################################
class Setup:
"""
Call this class to run the setup for the Keithley and the PID.
"""
def __init__(self):
"""
Prepare the Keithley to take data on the specified channels:
"""
global k2700
global heaterA
global heaterB
# Define Keithley instrument port:
self.k2700 = k2700 = Keithley_2700('GPIB0::1::INSTR')
# Define the ports for the PID
self.heaterB = heaterB = PID('/dev/cu.usbserial', 1) # TOP heater
self.heaterA = heaterA = PID('/dev/cu.usbserial', 2) # BOTTOM heater
"""
Prepare the Keithley for operation:
"""
self.k2700.openAllChannels
# Define the type of measurement for the channels we are looking at:
self.k2700.ctrl.write(":SENSe1:TEMPerature:TCouple:TYPE K") # Set ThermoCouple type
self.k2700.ctrl.write(":SENSe1:FUNCtion 'TEMPerature', (@ 109,110)")
self.k2700.ctrl.write(":SENSe1:FUNCtion 'VOLTage:DC', (@ 107,108)")
self.k2700.ctrl.write(":TRIGger:SEQuence1:DELay 0")
self.k2700.ctrl.write(":TRIGger:SEQuence1:COUNt 1") # Set the count rate
# Sets the the acquisition rate of the measurements
self.k2700.ctrl.write(":SENSe1:VOLTage:DC:NPLCycles 4, (@ 107,108)") # Sets integration period based on frequency
self.k2700.ctrl.write(":SENSe1:TEMPerature:NPLCycles 4, (@ 109,110)")
"""
Prepare the PID for operation:
"""
# Set the control method to PID
self.heaterA.write_register(PID.control, PID.pIDcontrol)
self.heaterB.write_register(PID.control, PID.pIDcontrol)
# Set the PID to auto parameter
self.heaterA.write_register(PID.pIDparam, PID.pIDparam_Auto)
self.heaterB.write_register(PID.pIDparam, PID.pIDparam_Auto)
# Set the thermocouple type
self.heaterA.write_register(PID.tCouple, PID.tCouple_K)
self.heaterB.write_register(PID.tCouple, PID.tCouple_K)
# Set the control to heating only
self.heaterA.write_register(PID.heatingCoolingControl, PID.heating)
self.heaterB.write_register(PID.heatingCoolingControl, PID.heating)
# Run the controllers
self.heaterA.run()
self.heaterB.run()
#end class
###############################################################################
###############################################################################
class ProcessThreadRun(Thread):
"""
Thread that runs the operations behind the GUI. This includes measuring
and plotting.
"""
#--------------------------------------------------------------------------
def __init__(self):
""" Init Worker Thread Class """
Thread.__init__(self)
self.start()
#end init
#--------------------------------------------------------------------------
def run(self):
""" Run Worker Thread """
#Setup()
td=TakeData()
#td = TakeDataTest()
#end def
#end class
###############################################################################
###############################################################################
class InitialCheck:
"""
Intial Check of temperatures and voltages.
"""
#--------------------------------------------------------------------------
def __init__(self):
self.k2700 = k2700
self.heaterA = heaterA
self.heaterB = heaterB
self.take_PID_Data()
self.take_Keithley_Data()
#end init
#--------------------------------------------------------------------------
def take_PID_Data(self):
""" Takes data from the PID
"""
# Take Data and time stamps:
self.pA = self.heaterA.get_pv()
self.pB = self.heaterB.get_pv()
self.pAset = self.heaterA.get_setpoint()
self.pBset = self.heaterB.get_setpoint()
self.updateGUI(stamp="PID A Status", data=self.pA)
self.updateGUI(stamp="PID B Status", data=self.pB)
self.updateGUI(stamp="PID A SP Status", data=self.pAset)
self.updateGUI(stamp="PID B SP Status", data=self.pBset)
print "PID A: %.2f C\nPID B: %.2f C" % (self.pA, self.pB)
#end def
#--------------------------------------------------------------------------
def take_Keithley_Data(self):
""" Takes data from the PID
"""
# Take Data and time stamps:
self.tA = self.k2700.fetch(tempAChannel)
self.tB = self.k2700.fetch(tempBChannel)
self.highV = float(self.k2700.fetch(highVChannel))*10**6
self.lowV = float(self.k2700.fetch(lowVChannel))*10**6
self.updateGUI(stamp="High Voltage Status", data=self.highV)
self.updateGUI(stamp="Low Voltage Status", data=self.lowV)
self.updateGUI(stamp="Temp A Status", data=self.tA)
self.updateGUI(stamp="Temp B Status", data=self.tB)
print "Temp A: %.2f C\nTemp B: %.2f C" % (float(self.tA), float(self.tB))
print "High Voltage: %.1f uV\nLow Voltage: %.1f uV" % (self.highV, self.lowV)
#end def
#--------------------------------------------------------------------------
def updateGUI(self, stamp, data):
"""
Sends data to the GUI (main thread), for live updating while the process is running
in another thread.
"""
time.sleep(0.1)
wx.CallAfter(pub.sendMessage, stamp, msg=data)
#end def
#end class
###############################################################################
###############################################################################
class TakeData:
''' Takes measurements and saves them to file. '''
#--------------------------------------------------------------------------
def __init__(self):
global abort_ID
global k2700
global heaterA
global heaterB
global tolerance
global stability_threshold
global oscillation
global measureList
global dTlist
global timecalclist, Vhighcalclist, Vlowcalclist, dTcalclist, avgTcalclist
self.k2700 = k2700
self.heaterA = heaterA
self.heaterB = heaterB
self.tolerance = tolerance
self.stability_threshold = stability_threshold
self.delay = 1
self.tempdelay = 5
self.tol = 'NO'
self.stable = 'NO'
self.measurement = 'OFF'
self.measurement_indicator = 'none'
self.updateGUI(stamp='Measurement', data=self.measurement)
self.delay = 1
self.tempdelay = 2
self.dTnum = 0
#time initializations
self.tpid = 0
self.ttempA = 0
self.ttempA2 = 0
self.ttempB = 0
self.ttempB2 = 0
self.tVhigh = 0
self.tVhigh2 = 0
self.tVlow = 0
self.tVlow2 = 0
self.exception_ID = 0
self.updateGUI(stamp='Status Bar', data='Running')
self.start = time.time()
print "start take data"
try:
while abort_ID == 0:
for avgtemp in measureList:
print "Set avg temp tp %f" %(avgtemp)
self.heaterA.set_setpoint(avgtemp)
self.heaterB.set_setpoint(avgtemp)
self.dTnum +=1
timecalclist = []
Vhighcalclist = []
Vlowcalclist = []
dTcalclist = []
avgTcalclist = []
currenttempA = avgtemp
currenttempB = avgtemp
self.recentpidA = []
self.recentpidAtime=[]
self.recentpidB = []
self.recentpidBtime=[]
self.stabilityA = '-'
self.stabilityB = '-'
self.updateGUI(stamp="Stability A", data=self.stabilityA)
self.updateGUI(stamp="Stability B", data=self.stabilityB)
self.take_PID_Data()
self.updateStats()
if abort_ID == 1: break
while (self.stable != 'OK'):
self.take_PID_Data()
self.updateStats()
if abort_ID == 1: break
# correct for difference between sample and heaters - more apparent for high temps
if (self.stabilityA != '-' or self.stabilityB != '-'):
if (np.abs(self.stabilityA) < self.stability_threshold or np.abs(self.stabilityB) < self.stability_threshold):
if ((avgtemp - self.tempA > self.tolerance)):
self.heaterA.set_setpoint(currenttempA+2)
currenttempA = currenttempA + 2
self.recentpidA = []
self.recentpidAtime=[]
self.stabilityA = '-'
self.recentpidB = []
self.recentpidBtime=[]
self.stabilityB = '-'
self.stable == 'NO'
self.tol = 'NO'
self.updateGUI(stamp="Stability A", data=self.stabilityA)
self.updateGUI(stamp="Stability B", data=self.stabilityB)
#end if
if ((avgtemp - self.tempB > self.tolerance)):
self.heaterB.set_setpoint(currenttempB+2)
currenttempB = currenttempB + 2
self.recentpidA = []
self.recentpidAtime=[]
self.stabilityA = '-'
self.recentpidB = []
self.recentpidBtime=[]
self.stabilityB = '-'
self.stable == 'NO'
self.tol = 'NO'
self.updateGUI(stamp="Stability A", data=self.stabilityA)
self.updateGUI(stamp="Stability B", data=self.stabilityB)
#end if
#end if
#end while
if abort_ID == 1: break
# vary dT
self.measurement_indicator = 'start'
for point in range(len(dTlist)):
dT = dTlist[point]
print "Set dT to %f" %(dT)
# ramp to correct dT
self.heaterA.set_setpoint(currenttempA+dT/2.0)
self.heaterB.set_setpoint(currenttempB-dT/2.0)
self.recentpidA = []
self.recentpidAtime=[]
self.recentpidB = []
self.recentpidBtime=[]
self.stabilityA = '-'
self.stabilityB = '-'
self.updateGUI(stamp="Stability A", data=self.stabilityA)
self.updateGUI(stamp="Stability B", data=self.stabilityB)
self.pidAset = float(self.heaterA.get_setpoint())
self.pidBset = float(self.heaterB.get_setpoint())
self.take_PID_Data()
self.updateStats()
if abort_ID == 1: break
while (self.stable != 'OK'):
self.take_PID_Data()
self.updateStats()
if abort_ID == 1: break
# end while
if abort_ID == 1: break
# start measurement
if (self.stable == 'OK'):
self.measurement = 'ON'
self.updateGUI(stamp='Measurement', data=self.measurement)
if abort_ID == 1: break
for i in range(4):
self.data_measurement()
if (point==len(dTlist)-1 and i == 3):
self.measurement_indicator = 'stop'
self.write_data_to_file()
if abort_ID == 1: break
#end for
if abort_ID == 1: break
self.measurement = 'OFF'
self.tol = 'NO'
self.stable = 'NO'
self.updateGUI(stamp='Measurement', data=self.measurement)
#end if
if abort_ID == 1: break
#end for
self.process_data()
if abort_ID == 1: break
#end for
abort_ID = 1
#end while
#end try
except exceptions.Exception as e:
log_exception(e)
abort_ID = 1
self.exception_ID = 1
print "Error Occurred, check error_log.log"
#end except
if self.exception_ID == 1:
self.updateGUI(stamp='Status Bar', data='Exception Occurred')
#end if
else:
self.updateGUI(stamp='Status Bar', data='Finished, Ready')
#end else
self.heaterA.set_setpoint(25)
self.heaterB.set_setpoint(25)
self.save_files()
wx.CallAfter(pub.sendMessage, 'Post Process')
wx.CallAfter(pub.sendMessage, 'Enable Buttons')
#end init
#--------------------------------------------------------------------------
def take_PID_Data(self):
""" Takes data from the PID and proceeds to a
function that checks the PID setpoints.
"""
try:
# Take Data and time stamps:
self.pidA = float(self.heaterA.get_pv())
self.pidB = float(self.heaterB.get_pv())
# Get the current setpoints on the PID:
self.pidAset = float(self.heaterA.get_setpoint())
self.pidBset = float(self.heaterB.get_setpoint())
except exceptions.ValueError as VE:
print(VE)
# Take Data and time stamps:
self.pidA = float(self.heaterA.get_pv())
self.pidB = float(self.heaterB.get_pv())
# Get the current setpoints on the PID:
self.pidAset = float(self.heaterA.get_setpoint())
self.pidBset = float(self.heaterB.get_setpoint())
self.tpid = time.time() - self.start
print "tpid: %.2f s\tpidA: %s C\tpidB: %s C" % (self.tpid, self.pidA, self.pidB)
#check stability of PID
if (len(self.recentpidA)<4):
self.recentpidA.append(self.pidA)
self.recentpidAtime.append(self.tpid)
self.recentpidB.append(self.pidB)
self.recentpidBtime.append(self.tpid)
else:
self.recentpidA.pop(0)
self.recentpidAtime.pop(0)
self.recentpidA.append(self.pidA)
self.recentpidAtime.append(self.tpid)
self.recentpidB.pop(0)
self.recentpidBtime.pop(0)
self.recentpidB.append(self.pidB)
self.recentpidBtime.append(self.tpid)
self.stabilityA = self.getStability(self.recentpidA,self.recentpidAtime)
print "stability A: %.4f C/min" % (self.stabilityA*60)
self.stabilityB = self.getStability(self.recentpidB,self.recentpidBtime)
print "stability B: %.4f C/min" % (self.stabilityB*60)
self.updateGUI(stamp="Stability A", data=self.stabilityA*60)
self.updateGUI(stamp="Stability B", data=self.stabilityB*60)
#end else
self.updateGUI(stamp="PID A", data=self.pidA)
self.updateGUI(stamp="PID B", data=self.pidB)
self.updateGUI(stamp="Time PID", data=self.tpid)
self.updateGUI(stamp="PID A SP", data=self.pidAset)
self.updateGUI(stamp="PID B SP", data=self.pidBset)
self.safety_check()
self.check_status()
#end def
#--------------------------------------------------------------------------
def safety_check(self):
global maxLimit
global abort_ID
if float(self.pidA) > maxLimit or float(self.pidB) > maxLimit:
abort_ID = 1
#end def
#--------------------------------------------------------------------------
def updateStats(self):
print('update all stats\n')
self.tempA = float(self.k2700.fetch(tempAChannel))
self.ttempA = time.time() - self.start
self.tempB = float(self.k2700.fetch(tempBChannel))
self.ttempB = time.time() - self.start
self.updateGUI(stamp="Time Temp A", data=float(self.ttempA))
self.updateGUI(stamp="Temp A", data=float(self.tempA))
self.updateGUI(stamp="Time Temp B", data=float(self.ttempB))
self.updateGUI(stamp="Temp B", data=float(self.tempB))
print "tempA: %s C\ntempB: %s C" % (self.tempA, self.tempB)
print "time %f" % (time.time()-self.start)
self.Vhigh = float(self.k2700.fetch(highVChannel))*10**6
self.Vhighcalc = self.voltage_Correction(float(self.Vhigh), 'high')
self.tVhigh = time.time() - self.start
self.Vlow = float(self.k2700.fetch(lowVChannel))*10**6
self.Vlowcalc = self.voltage_Correction(float(self.Vlow), 'low')
self.tVlow = time.time() - self.start
self.updateGUI(stamp="Time High Voltage", data=float(self.tVhigh))
self.updateGUI(stamp="High Voltage", data=float(self.Vhighcalc))
self.updateGUI(stamp="Time Low Voltage", data=float(self.tVlow))
self.updateGUI(stamp="Low Voltage", data=float(self.Vlowcalc))
print "high voltage: %.2f\nlow voltage: %.2f" % (self.Vhighcalc, self.Vlowcalc)
print "time %f" % (time.time()-self.start)
global rawfile
print('\nWrite status to file\n')
rawfile.write('%.1f,'%(self.tVlow))
rawfile.write('%.2f,%.2f,' %(self.pidA,self.pidB))
rawfile.write('%.2f,%.2f,'%(self.tempA,self.tempB))
rawfile.write('%.3f,'%(self.Vhighcalc))
rawfile.write('%.3f\n'%(self.Vlowcalc))
#end def
#--------------------------------------------------------------------------
def getStability(self, temps, times):
coeffs = np.polyfit(times, temps, 1)
# Polynomial Coefficients
results = coeffs.tolist()
return results[0]
#end def
#--------------------------------------------------------------------------
def check_status(self):
if (np.abs(self.pidA-self.pidAset) < self.tolerance and np.abs(self.pidB-self.pidBset) < self.tolerance):
self.tol = 'OK'
#end if
else:
self.tol = 'NO'
#end else
if (self.stabilityA != '-' and self.stabilityB != '-'):
if (np.abs(self.stabilityA) < self.stability_threshold and np.abs(self.stabilityB) < self.stability_threshold):
self.stable = 'OK'
#end if
else:
self.stable = 'NO'
#end if
else:
self.stable = 'NO'
#end else
print "tolerance: %s\nstable: %s\n" % (self.tol, self.stable)
#end else
#end elif
self.updateGUI(stamp="Status Bar", data=[self.tol, self.stable])
#end def
#--------------------------------------------------------------------------
def data_measurement(self):
# Takes and writes to file the data on the Keithley
# The only change between blocks like this one is the specific
# channel on the Keithley that is being measured.
self.tempA = float(self.k2700.fetch(tempAChannel))
self.ttempA = time.time() - self.start
self.updateGUI(stamp="Time Temp A", data=float(self.ttempA))
self.updateGUI(stamp="Temp A", data=float(self.tempA))
print "ttempA: %.2f s\ttempA: %.2f C" % (self.ttempA, self.tempA)
time.sleep(0.2)
# The rest is a repeat of the above code, for different
# channels.
self.tempB = float(self.k2700.fetch(tempBChannel))
self.ttempB = time.time() - self.start
self.updateGUI(stamp="Time Temp B", data=float(self.ttempB))
self.updateGUI(stamp="Temp B", data=float(self.tempB))
print "ttempB: %.2f s\ttempB: %.2f C" % (self.ttempB, self.tempB)
time.sleep(0.2)
self.Vhigh = float(self.k2700.fetch(highVChannel))*10**6
self.Vhighcalc = self.voltage_Correction(float(self.Vhigh), 'high')
self.tVhigh = time.time() - self.start
self.updateGUI(stamp="Time High Voltage", data=float(self.tVhigh))
self.updateGUI(stamp="High Voltage", data=float(self.Vhighcalc))
print "thighV: %.2f s\thighV_raw: %f uV\thighV_corrected: %f uV" % (self.tVhigh, self.Vhigh, self.Vhighcalc)
time.sleep(0.2)
self.Vlow = float(self.k2700.fetch(lowVChannel))*10**6
self.Vlowcalc = self.voltage_Correction(float(self.Vlow), 'low')
self.tVlow = time.time() - self.start
self.updateGUI(stamp="Time Low Voltage", data=float(self.tVlow))
self.updateGUI(stamp="Low Voltage", data=float(self.Vlowcalc))
print "tlowV: %.2f s\tlowV_raw: %f uV\tlowV_corrected: %f uV" % (self.tVlow, self.Vlow, self.Vlowcalc)
time.sleep(0.2)
# Symmetrize the measurement and repeat in reverse
self.Vlow2 = float(self.k2700.fetch(lowVChannel))*10**6
self.Vlowcalc2 = self.voltage_Correction(float(self.Vlow2), 'low')
self.tVlow2 = time.time() - self.start
self.updateGUI(stamp="Time Low Voltage", data=float(self.tVlow2))
self.updateGUI(stamp="Low Voltage", data=float(self.Vlowcalc2))
print "tlowV: %.2f s\tlowV_raw: %f uV\tlowV_corrected: %f uV" % (self.tVlow2, self.Vlow2, self.Vlowcalc2)
time.sleep(0.2)
self.Vhigh2 = float(self.k2700.fetch(highVChannel))*10**6
self.Vhighcalc2 = self.voltage_Correction(float(self.Vhigh2), 'high')
self.tVhigh2 = time.time() - self.start
self.updateGUI(stamp="Time High Voltage", data=float(self.tVhigh2))
self.updateGUI(stamp="High Voltage", data=float(self.Vhighcalc2))
print "thighV: %.2f s\thighV_raw: %f uV\thighV_corrected: %f uV" % (self.tVhigh2, self.Vhigh2, self.Vhighcalc2)
time.sleep(0.2)
self.tempB2 = float(self.k2700.fetch(tempBChannel))
self.ttempB2 = time.time() - self.start
self.updateGUI(stamp="Time Temp B", data=float(self.ttempB2))
self.updateGUI(stamp="Temp B", data=float(self.tempB2))
print "ttempB: %.2f s\ttempB: %.2f C" % (self.ttempB2, self.tempB2)
time.sleep(0.2)
self.tempA2 = float(self.k2700.fetch(tempAChannel))
self.ttempA2 = time.time() - self.start
self.updateGUI(stamp="Time Temp A", data=float(self.ttempA2))
self.updateGUI(stamp="Temp A", data=float(self.tempA2))
print "ttempA: %.2f s\ttempA: %.2f C" % (self.ttempA2, self.tempA2)
#end def
#--------------------------------------------------------------------------
def voltage_Correction(self, raw_data, side):
''' raw_data must be in uV '''
# Kelvin conversion for polynomial correction.
if self.ttempA > self.ttempA2:
tempA = float(self.tempA) + 273.15
else:
tempA = float(self.tempA2) + 273.15
if self.ttempB > self.ttempB2:
tempB = float(self.tempB) + 273.15
else:
tempB = float(self.tempB2) + 273.15
self.dT = tempA - tempB
avgT = (tempA + tempB)/2
# Correction for effect from Thermocouple Seebeck
out = self.alpha(avgT, side)*self.dT - raw_data
return out
#end def
#--------------------------------------------------------------------------
def alpha(self, x, side):
''' x = avgT
alpha in uV/K
'''
if tc_type == "k-type":
### If Chromel, taken from Chromel_Seebeck.txt
if side == 'high':
if ( x >= 270 and x < 700):
alpha = -2467.61114613*x**0 + 55.6028987953*x**1 + \
-0.552110359087*x**2 + 0.00320554346691*x**3 + \
-1.20477254034e-05*x**4 + 3.06344710205e-08*x**5 + \
-5.33914758601e-11*x**6 + 6.30044607727e-14*x**7 + \
-4.8197269477e-17*x**8 + 2.15928374212e-20*x**9 + \
-4.30421084091e-24*x**10
#end if
elif ( x >= 700 and x < 1599):
alpha = 1165.13254764*x**0 + -9.49622421414*x**1 + \
0.0346344390853*x**2 + -7.27785048931e-05*x**3 + \
9.73981855547e-08*x**4 + -8.64369652227e-11*x**5 + \
5.10080771762e-14*x**6 + -1.93318725171e-17*x**7 + \
4.27299905603e-21*x**8 + -4.19761748937e-25*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Chromel)
### If Alumel, taken from Alumel_Seebeck.txt
elif side == 'low':
if ( x >= 270 and x < 570):
alpha = -3465.28789643*x**0 + 97.4007289124*x**1 + \
-1.17546754681*x**2 + 0.00801252041119*x**3 + \
-3.41263237031e-05*x**4 + 9.4391002358e-08*x**5 + \
-1.69831949233e-10*x**6 + 1.91977765586e-13*x**7 + \
-1.2391854625e-16*x**8 + 3.48576207577e-20*x**9
#end if
elif ( x >= 570 and x < 1599):
alpha = 254.644633774*x**0 + -2.17639940109*x**1 + \
0.00747127856327*x**2 + -1.41920634198e-05*x**3 + \
1.61971537881e-08*x**4 + -1.14428153299e-11*x**5 + \
4.969263632e-15*x**6 + -1.27526741699e-18*x**7 + \
1.80403838088e-22*x**8 + -1.23699936952e-26*x**9
#end if
else:
print "Error in voltage correction, out of range."
#end if (Alumel)
else:
print "Error in voltage correction."
#end if (K-type)
return alpha
#end def
#--------------------------------------------------------------------------
def write_data_to_file(self):
global timecalclist, Vhighcalclist, Vlowcalclist, dTcalclist, avgTcalclist
global myfile
print('\nWrite data to file\n')
time = (self.ttempA + self.ttempB + self.tVlow + self.tVhigh + self.ttempA2 + self.ttempB2 + self.tVlow2 + self.tVhigh2)/8
ta = (self.tempA + self.tempA2)/2
tb = (self.tempB + self.tempB2)/2
avgt = (ta + tb)/2
dt = ta-tb
vhigh = (self.Vhighcalc + self.Vhighcalc2)/2
vlow = (self.Vlowcalc + self.Vlowcalc2)/2
myfile.write('%f,' %(time))
myfile.write('%f,%f,' % (avgt, dt) )
myfile.write('%.3f,%.3f' % (vhigh,vlow))
timecalclist.append(time)
Vhighcalclist.append(vhigh)
Vlowcalclist.append(vlow)
dTcalclist.append(dt)
avgTcalclist.append(avgt)
# indicates whether an oscillation has started or stopped
if self.measurement_indicator == 'start':
myfile.write(',Start Oscillation')
self.measurement_indicator = 'none'
elif self.measurement_indicator == 'stop':
myfile.write(',Stop Oscillation')
self.measurement_indicator = 'none'
elif self.measurement_indicator == 'none':
myfile.write(', ')
else:
myfile.write(', ')
myfile.write('\n')
#end def
#--------------------------------------------------------------------------
def updateGUI(self, stamp, data):
"""
Sends data to the GUI (main thread), for live updating while the process is running
in another thread.
"""
time.sleep(0.1)
wx.CallAfter(pub.sendMessage, stamp, msg=data)
#end def
#--------------------------------------------------------------------------
def process_data(self):
global timecalclist, Vhighcalclist, Vlowcalclist, dTcalclist, avgTcalclist
global processfile
time = np.average(timecalclist)
avgT = np.average(avgTcalclist)
results_high = {}
results_low = {}
coeffs_high = np.polyfit(dTcalclist, Vhighcalclist, 1)
coeffs_low = np.polyfit(dTcalclist,Vlowcalclist,1)
# Polynomial Coefficients
polynomial_high = coeffs_high.tolist()
polynomial_low = coeffs_low.tolist()
seebeck_high = polynomial_high[0]
offset_high = polynomial_high[1]
seebeck_low = polynomial_low[0]
offset_low = polynomial_low[1]
# Calculate coefficient of determination (r-squared):
p_high = np.poly1d(coeffs_high)
p_low = np.poly1d(coeffs_low)
# fitted values:
yhat_high = p_high(dTcalclist)
yhat_low = p_low(dTcalclist)
# mean of values:
ybar_high = np.sum(Vhighcalclist)/len(Vhighcalclist)
ybar_low = np.sum(Vlowcalclist)/len(Vlowcalclist)
# regression sum of squares:
ssreg_high = np.sum((yhat_high-ybar_high)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
ssreg_low = np.sum((yhat_low-ybar_low)**2)
# total sum of squares:
sstot_high = np.sum((Vhighcalclist - ybar_high)**2)
sstot_low = np.sum((Vlowcalclist - ybar_low)**2) # or sum([ (yi - ybar)**2 for yi in y])
rsquared_high = ssreg_high / sstot_high
rsquared_low = ssreg_low / sstot_low
processfile.write('%.1f,%.3f,%.3f,%.3f,%.2f,%.2f,%.5f,%.5f\n'%(time,avgT,seebeck_high,offset_high,rsquared_high,seebeck_low,offset_low,rsquared_low))
fithigh = {}
fitlow = {}
fithigh['polynomial'] = polynomial_high
fitlow['polynomial'] = polynomial_low
fithigh['r-squared'] = rsquared_high
fitlow['r-squared'] = rsquared_low
celsius = u"\u2103"
self.create_plot(dTcalclist,Vlowcalclist,Vhighcalclist,fitlow,fithigh,str(avgT)+celsius)
self.updateGUI(stamp="Seebeck High", data=seebeck_high)
self.updateGUI(stamp="Seebeck Low", data=seebeck_low)
#end def
#--------------------------------------------------------------------------
def create_plot(self, x, ylow, yhigh, fitLow, fitHigh, title):
global filePath
dpi = 400
plt.ioff()
# Create Plot:
fig = plt.figure(self.dTnum, dpi=dpi)
ax = fig.add_subplot(111)
ax.grid()
ax.set_title(title)
ax.set_xlabel("dT (K)")
ax.set_ylabel("dV (uV)")
# Plot data points:
ax.scatter(x, ylow, color='r', marker='.', label="Low Voltage")
ax.scatter(x, yhigh, color='b', marker='.', label="High Voltage")
# Overlay linear fits:
coeffsLow = fitLow['polynomial']
coeffsHigh = fitHigh['polynomial']
p_low = np.poly1d(coeffsLow)
p_high = np.poly1d(coeffsHigh)
xp = np.linspace(min(x), max(x), 5000)
low_eq = 'dV = %.2f*(dT) + %.2f' % (coeffsLow[0], coeffsLow[1])
high_eq = 'dV = %.2f*(dT) + %.2f' % (coeffsHigh[0], coeffsHigh[1])
ax.plot(xp, p_low(xp), '-', c='#FF9900', label="Low Voltage Fit\n %s" % low_eq)
ax.plot(xp, p_high(xp), '-', c='g', label="High Voltage Fit\n %s" % high_eq)
ax.legend(loc='upper left', fontsize='10')
# Save:
plot_folder = filePath + '/Seebeck Plots/'
if not os.path.exists(plot_folder):
os.makedirs(plot_folder)
fig.savefig('%s.png' % (plot_folder + title) , dpi=dpi)
plt.close()
#end def
#--------------------------------------------------------------------------
def save_files(self):
''' Function saving the files after the data acquisition loop has been
exited.
'''
print('Save Files')
global dataFile
global finaldataFile
global myfile
global rawfile
global processfile
stop = time.time()
end = datetime.now() # End time
totalTime = stop - self.start # Elapsed Measurement Time (seconds)
endStr = 'end time: %s \nelapsed measurement time: %s seconds \n \n' % (str(end), str(totalTime))
myfile.close() # Close the file
rawfile.close()
processfile.close()
myfile = open(dataFile, 'r') # Opens the file for Reading
contents = myfile.readlines() # Reads the lines of the file into python set
myfile.close()
# Adds elapsed measurement time to the read file list
contents.insert(1, endStr) # Specify which line and what value to insert
# NOTE: First line is line 0
# Writes the elapsed measurement time to the final file
myfinalfile = open(finaldataFile,'w')
contents = "".join(contents)
myfinalfile.write(contents)
myfinalfile.close()
# Save the GUI plots
global save_plots_ID
save_plots_ID = 1
self.updateGUI(stamp='Save_All', data='Save')
#end def
#end class
###############################################################################
###############################################################################
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
#--------------------------------------------------------------------------
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1, label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1, label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(30,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
#end init
#--------------------------------------------------------------------------
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
#end def
#--------------------------------------------------------------------------
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
#end def
#--------------------------------------------------------------------------
def is_auto(self):
return self.radio_auto.GetValue()
#end def
#--------------------------------------------------------------------------
def manual_value(self):
return self.value
#end def
#end class
###############################################################################
###############################################################################
class UserPanel(wx.Panel):
''' User Input Panel '''
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
global tolerance
global oscillation
global stability_threshold
self.oscillation = oscillation
self.tolerance = tolerance
self.stability_threshold = stability_threshold*60
self.create_title("User Panel") # Title
self.celsius = u"\u2103"
self.font2 = wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL)
self.oscillation_control() # Oscillation range control
self.tolerance_control() # PID tolerance level Control
self.stability_control() # PID stability threshold control
self.measurementListBox()
self.maxLimit_label()
self.linebreak1 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak2 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak3 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak4 = wx.StaticLine(self, pos=(-1,-1), size=(600,1), style=wx.LI_HORIZONTAL)
self.run_stop() # Run and Stop buttons
self.create_sizer() # Set Sizer for panel
pub.subscribe(self.enable_buttons, "Enable Buttons")
#end init
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def run_stop(self):
self.run_stopPanel = wx.Panel(self, -1)
rs_sizer = wx.GridBagSizer(3, 3)
self.btn_check = btn_check = wx.Button(self.run_stopPanel, label='check', style=0, size=(60,30)) # Initial Status Button
btn_check.SetBackgroundColour((0,0,255))
caption_check = wx.StaticText(self.run_stopPanel, label='*check inital status')
self.btn_run = btn_run = wx.Button(self.run_stopPanel, label='run', style=0, size=(60,30)) # Run Button
btn_run.SetBackgroundColour((0,255,0))
caption_run = wx.StaticText(self.run_stopPanel, label='*run measurement')
self.btn_stop = btn_stop = wx.Button(self.run_stopPanel, label='stop', style=0, size=(60,30)) # Stop Button
btn_stop.SetBackgroundColour((255,0,0))
caption_stop = wx.StaticText(self.run_stopPanel, label = '*quit operation')
btn_check.Bind(wx.EVT_BUTTON, self.check)
btn_run.Bind(wx.EVT_BUTTON, self.run)
btn_stop.Bind(wx.EVT_BUTTON, self.stop)
controlPanel = wx.StaticText(self.run_stopPanel, label='Control Panel')
controlPanel.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD))
rs_sizer.Add(controlPanel,(0,0), span=(1,3),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(btn_check,(1,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(caption_check,(2,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(btn_run,(1,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(caption_run,(2,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(btn_stop,(1,2),flag=wx.ALIGN_CENTER_HORIZONTAL)
rs_sizer.Add(caption_stop,(2,2),flag=wx.ALIGN_CENTER_HORIZONTAL)
self.run_stopPanel.SetSizer(rs_sizer)
btn_stop.Disable()
# end def
#--------------------------------------------------------------------------
def check(self, event):
InitialCheck()
#end def
#--------------------------------------------------------------------------
def run(self, event):
global k2700
global dataFile
global statusFile
global finaldataFile
global myfile
global rawfile
global processfile
global measureList
global abort_ID
measureList = [None]*self.listbox.GetCount()
for k in xrange(self.listbox.GetCount()):
measureList[k] = int(self.listbox.GetString(k))
#end for
if (len(measureList) > 0 and len(dTlist) > 0 ):
try:
self.name_folder()
if self.run_check == wx.ID_OK:
myfile = open(dataFile, 'w') # opens file for writing/overwriting
rawfile = open(statusFile,'w')
processfile = open(seebeckFile,'w')
begin = datetime.now() # Current date and time
myfile.write('Start Time: ' + str(begin) + '\n')
rawfile.write('Start Time: ' + str(begin) + '\n')
processfile.write('Start Time: ' + str(begin) + '\n')
dataheaders = 'time, avgtemp, deltatemp, Vhigh, Vlow, indicator\n'
myfile.write(dataheaders)
rawheaders = 'time, pidA, pidB, tempA, tempB, Vhigh, Vlow\n'
rawfile.write(rawheaders)
processheaders = 'time(s),temperature (C),seebeck_high (uV/K),offset_high (uV),R^2_high,seebeck_low (uV/K),offset_low (uV),R^2_low\n'
processfile.write(processheaders)
abort_ID = 0
self.btn_osc.Disable()
self.btn_tol.Disable()
self.btn_stability_threshold.Disable()
self.btn_new.Disable()
self.btn_ren.Disable()
self.btn_dlt.Disable()
self.btn_clr.Disable()
self.btn_check.Disable()
self.btn_run.Disable()
self.btn_stop.Enable()
#start the threading process
thread = ProcessThreadRun()
#end if
#end try
except visa.VisaIOError:
wx.MessageBox("Not all instruments are connected!", "Error")
#end except
#end if
#end def
#--------------------------------------------------------------------------
def name_folder(self):
question = wx.MessageDialog(None, 'The data files are saved into a folder upon ' + \
'completion. \nBy default, the folder will be named with a time stamp.\n\n' + \
'Would you like to name your own folder?', 'Question',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
answer = question.ShowModal()
if answer == wx.ID_YES:
self.folder_name = wx.GetTextFromUser('Enter the name of your folder.\n' + \
'Only type in a name, NOT a file path.')
if self.folder_name == "":
wx.MessageBox("Canceled")
else:
self.choose_dir()
#end if
else:
date = str(datetime.now())
self.folder_name = 'Seebeck Data %s.%s.%s' % (date[0:13], date[14:16], date[17:19])
self.choose_dir()
#end else
#end def
#--------------------------------------------------------------------------
def choose_dir(self):
found = False
dlg = wx.DirDialog (None, "Choose the directory to save your files.", "",
wx.DD_DEFAULT_STYLE)
self.run_check = dlg.ShowModal()
if self.run_check == wx.ID_OK:
global filePath
filePath = dlg.GetPath()
filePath = filePath + '/' + self.folder_name
if not os.path.exists(filePath):
os.makedirs(filePath)
os.chdir(filePath)
else:
n = 1
while found == False:
path = filePath + ' - ' + str(n)
if os.path.exists(path):
n = n + 1
else:
os.makedirs(path)
os.chdir(path)
n = 1
found = True
#end while
#end else
#end if
# Set the global path to the newly created path, if applicable.
if found == True:
filePath = path
#end if
#end def
#--------------------------------------------------------------------------
def stop(self, event):
global abort_ID
abort_ID = 1
self.enable_buttons
#end def
#--------------------------------------------------------------------------
def oscillation_control(self):
self.oscPanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_osc = wx.StaticText(self, label="PID Oscillaton (%s):"% self.celsius)
self.text_osc = text_osc = wx.StaticText(self.oscPanel, label=str(self.oscillation) + ' '+self.celsius)
text_osc.SetFont(self.font2)
self.edit_osc = edit_osc = wx.TextCtrl(self.oscPanel, size=(40, -1))
self.btn_osc = btn_osc = wx.Button(self.oscPanel, label="save", size=(40, -1))
text_guide_osc = wx.StaticText(self.oscPanel, label="The PID will oscillate within this \ndegree range when oscillating at \na measurement.")
btn_osc.Bind(wx.EVT_BUTTON, self.save_oscillation)
hbox.Add((0, -1))
hbox.Add(text_osc, 0, wx.LEFT, 5)
hbox.Add(edit_osc, 0, wx.LEFT, 40)
hbox.Add(btn_osc, 0, wx.LEFT, 5)
hbox.Add(text_guide_osc, 0, wx.LEFT, 5)
self.oscPanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def save_oscillation(self, e):
global oscillation
global dTlist
try:
self.oscillation = self.edit_osc.GetValue()
if float(self.oscillation) > maxLimit:
self.oscillation = str(maxLimit)
self.text_osc.SetLabel(self.oscillation)
oscillation = float(self.oscillation)
dTlist = [oscillation*i/4 for i in range(0,-5,-1)+range(-3,5)+range(3,-1,-1)]
except ValueError:
wx.MessageBox("Invalid input. Must be a number.", "Error")
#end def
#--------------------------------------------------------------------------
def tolerance_control(self):
self.tolPanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_tol = wx.StaticText(self, label="Tolerance ("+self.celsius+")")
self.text_tol = text_tol = wx.StaticText(self.tolPanel, label=str(self.tolerance) + ' '+self.celsius)
text_tol.SetFont(self.font2)
self.edit_tol = edit_tol = wx.TextCtrl(self.tolPanel, size=(40, -1))
self.btn_tol = btn_tol = wx.Button(self.tolPanel, label="save", size=(40, -1))
text_guide_tol = wx.StaticText(self.tolPanel, label="The tolerance within the\nPID set points necessary\nto start a measurement")
btn_tol.Bind(wx.EVT_BUTTON, self.save_tolerance)
hbox.Add((0, -1))
hbox.Add(text_tol, 0, wx.LEFT, 5)
hbox.Add(edit_tol, 0, wx.LEFT, 40)
hbox.Add(btn_tol, 0, wx.LEFT, 5)
hbox.Add(text_guide_tol, 0, wx.LEFT, 5)
self.tolPanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def save_tolerance(self, e):
global tolerance
global oscillation
try:
self.tolerance = self.edit_tol.GetValue()
if float(self.tolerance) > oscillation:
self.tolerance = str(oscillation-1)
self.text_tol.SetLabel(self.tolerance)
tolerance = float(self.tolerance)
except ValueError:
wx.MessageBox("Invalid input. Must be a number.", "Error")
#end def
#--------------------------------------------------------------------------
def stability_control(self):
self.stability_threshold_Panel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_stability_threshold = wx.StaticText(self, label="Stability Threshold ("+self.celsius+"/min)")
self.text_stability_threshold = text_stability_threshold = wx.StaticText(self.stability_threshold_Panel, label=str(self.stability_threshold) + ' '+self.celsius+'/min')
text_stability_threshold.SetFont(self.font2)
self.edit_stability_threshold = edit_stability_threshold = wx.TextCtrl(self.stability_threshold_Panel, size=(40, -1))
self.btn_stability_threshold = btn_stability_threshold = wx.Button(self.stability_threshold_Panel, label="save", size=(40, -1))
text_guide_stability_threshold = wx.StaticText(self.stability_threshold_Panel, label='The change in the PID must\nbe below this threshold before\na measurement will begin.')
btn_stability_threshold.Bind(wx.EVT_BUTTON, self.save_stability_threshold)
hbox.Add((0, -1))
hbox.Add(text_stability_threshold, 0, wx.LEFT, 5)
hbox.Add(edit_stability_threshold, 0, wx.LEFT, 40)
hbox.Add(btn_stability_threshold, 0, wx.LEFT, 5)
hbox.Add(text_guide_stability_threshold, 0, wx.LEFT, 5)
self.stability_threshold_Panel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def save_stability_threshold(self, e):
global stability_threshold
try:
self.stability_threshold = self.edit_stability_threshold.GetValue()
self.text_stability_threshold.SetLabel(self.stability_threshold)
stability_threshold = float(self.stability_threshold)/60
except ValueError:
wx.MessageBox("Invalid input. Must be a number.", "Error")
#end def
#--------------------------------------------------------------------------
def measurementListBox(self):
# ids for measurement List Box
ID_NEW = 1
ID_CHANGE = 2
ID_CLEAR = 3
ID_DELETE = 4
self.measurementPanel = wx.Panel(self, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.label_measurements = wx.StaticText(self,
label="Measurements (%s):"
% self.celsius
)
self.label_measurements.SetFont(self.font2)
self.listbox = wx.ListBox(self.measurementPanel, size=(75,150))
btnPanel = wx.Panel(self.measurementPanel, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
self.btn_new = new = wx.Button(btnPanel, ID_NEW, 'New', size=(50, 20))
self.btn_ren = ren = wx.Button(btnPanel, ID_CHANGE, 'Change', size=(50, 20))
self.btn_dlt = dlt = wx.Button(btnPanel, ID_DELETE, 'Delete', size=(50, 20))
self.btn_clr = clr = wx.Button(btnPanel, ID_CLEAR, 'Clear', size=(50, 20))
self.Bind(wx.EVT_BUTTON, self.NewItem, id=ID_NEW)
self.Bind(wx.EVT_BUTTON, self.OnRename, id=ID_CHANGE)
self.Bind(wx.EVT_BUTTON, self.OnDelete, id=ID_DELETE)
self.Bind(wx.EVT_BUTTON, self.OnClear, id=ID_CLEAR)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
vbox.Add((-1, 5))
vbox.Add(new)
vbox.Add(ren, 0, wx.TOP, 5)
vbox.Add(dlt, 0, wx.TOP, 5)
vbox.Add(clr, 0, wx.TOP, 5)
btnPanel.SetSizer(vbox)
#hbox.Add(self.label_measurements, 0, wx.LEFT, 5)
hbox.Add(self.listbox, 1, wx.ALL, 5)
hbox.Add(btnPanel, 0, wx.RIGHT, 5)
self.measurementPanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def NewItem(self, event):
text = wx.GetTextFromUser('Enter a new measurement', 'Insert dialog')
if text != '':
self.listbox.Append(text)
time.sleep(0.2)
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def OnRename(self, event):
sel = self.listbox.GetSelection()
text = self.listbox.GetString(sel)
renamed = wx.GetTextFromUser('Rename item', 'Rename dialog', text)
if renamed != '':
self.listbox.Delete(sel)
self.listbox.Insert(renamed, sel)
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def OnDelete(self, event):
sel = self.listbox.GetSelection()
if sel != -1:
self.listbox.Delete(sel)
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def OnClear(self, event):
self.listbox.Clear()
self.listbox_max_limit(maxLimit)
#end def
#--------------------------------------------------------------------------
def listbox_max_limit(self, limit):
""" Sets user input to only allow a maximum temperature. """
mlist = [None]*self.listbox.GetCount()
for i in xrange(self.listbox.GetCount()):
mlist[i] = int(self.listbox.GetString(i))
if mlist[i] > limit:
self.listbox.Delete(i)
self.listbox.Insert(str(limit), i)
#end def
#--------------------------------------------------------------------------
def maxLimit_label(self):
self.maxLimit_Panel = wx.Panel(self, -1)
maxLimit_label = wx.StaticText(self.maxLimit_Panel, label='Max Limit Temp:')
maxLimit_text = wx.StaticText(self.maxLimit_Panel, label='%s %s' % (str(maxLimit), self.celsius))
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(maxLimit_label, 0, wx.LEFT, 5)
hbox.Add(maxLimit_text, 0, wx.LEFT, 5)
self.maxLimit_Panel.SetSizer(hbox)
#edn def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(8,2)
sizer.Add(self.titlePanel, (0, 1), span=(1,2), flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_osc, (1, 1))
sizer.Add(self.oscPanel, (1, 2))
sizer.Add(self.label_tol, (2,1))
sizer.Add(self.tolPanel, (2, 2))
sizer.Add(self.label_stability_threshold, (3,1))
sizer.Add(self.stability_threshold_Panel, (3, 2))
sizer.Add(self.label_measurements, (4,1))
sizer.Add(self.measurementPanel, (4, 2))
sizer.Add(self.maxLimit_Panel, (5, 1), span=(1,2))
sizer.Add(self.linebreak4, (6,1),span = (1,2))
sizer.Add(self.run_stopPanel, (7,1),span = (1,2), flag=wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
#end def
#--------------------------------------------------------------------------
def post_process_data(self):
global filePath, finaldataFile, tc_type
#try:
# Post processing:
#Seebeck_Processing_v5.create_processed_files(filePath, finaldataFile, tc_type)
#except IndexError:
#wx.MessageBox('Not enough data for post processing to occur. \n\nIt is likely that we did not even complete any oscillations.', 'Error', wx.OK | wx.ICON_INFORMATION)
#end def
#--------------------------------------------------------------------------
def enable_buttons(self):
self.btn_check.Enable()
self.btn_run.Enable()
self.btn_osc.Enable()
self.btn_tol.Enable()
self.btn_stability_threshold.Enable()
self.btn_ren.Enable()
self.btn_dlt.Enable()
self.btn_clr.Enable()
self.btn_stop.Disable()
#end def
#end class
###############################################################################
###############################################################################
class StatusPanel(wx.Panel):
"""
Current Status of Measurements
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
self.celsius = u"\u2103"
self.delta = u"\u0394"
self.mu = u"\u00b5"
self.ctime = str(datetime.now())[11:19]
self.t='0:00:00'
self.highV=str(0)
self.lowV = str(0)
self.tA=str(30)
self.tB=str(30)
self.pA=str(30)
self.pB=str(30)
self.pAset=str(30)
self.pBset=str(30)
self.stabilityA = '-'
self.stabilityB = '-'
self.dT = str(float(self.tA)-float(self.tB))
self.avgT = str((float(self.tA)+float(self.tB))/2)
self.seebeckhigh = '-'
self.seebecklow = '-'
self.mea = '-'
self.create_title("Status Panel")
self.linebreak1 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.create_status()
self.linebreak2 = wx.StaticLine(self, pos=(-1,-1), size=(300,1))
self.linebreak3 = wx.StaticLine(self, pos=(-1,-1), size=(1,300), style=wx.LI_VERTICAL)
# Updates from running program
pub.subscribe(self.OnTime, "Time High Voltage")
pub.subscribe(self.OnTime, "Time Low Voltage")
pub.subscribe(self.OnTime, "Time Temp A")
pub.subscribe(self.OnTime, "Time Temp B")
pub.subscribe(self.OnHighVoltage, "High Voltage")
pub.subscribe(self.OnLowVoltage, "Low Voltage")
pub.subscribe(self.OnTempA, "Temp A")
pub.subscribe(self.OnTempB, "Temp B")
pub.subscribe(self.OnPIDA, "PID A")
pub.subscribe(self.OnPIDB, "PID B")
pub.subscribe(self.OnPIDAset, "PID A SP")
pub.subscribe(self.OnPIDBset, "PID B SP")
pub.subscribe(self.OnStabilityA, "Stability A")
pub.subscribe(self.OnStabilityB, "Stability B")
pub.subscribe(self.OnMeasurement, 'Measurement')
pub.subscribe(self.OnSeebeckHigh, "Seebeck High")
pub.subscribe(self.OnSeebeckLow, "Seebeck Low")
# Updates from inital check
pub.subscribe(self.OnHighVoltage, "High Voltage Status")
pub.subscribe(self.OnLowVoltage, "Low Voltage Status")
pub.subscribe(self.OnTempA, "Temp A Status")
pub.subscribe(self.OnTempB, "Temp B Status")
pub.subscribe(self.OnPIDA, "PID A Status")
pub.subscribe(self.OnPIDB, "PID B Status")
pub.subscribe(self.OnPIDAset, "PID A SP Status")
pub.subscribe(self.OnPIDBset, "PID B SP Status")
#self.update_values()
self.create_sizer()
#end init
#--------------------------------------------------------------------------
def OnHighVoltage(self, msg):
self.highV = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnLowVoltage(self, msg):
self.lowV = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnTempA(self, msg):
self.tA = '%.1f'%(float(msg))
self.dT = str(float(self.tA)-float(self.tB))
self.avgT = str((float(self.tA)+float(self.tB))/2)
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnTempB(self, msg):
self.tB = '%.1f'%(float(msg))
self.dT = str(float(self.tA)-float(self.tB))
self.avgT = str((float(self.tA)+float(self.tB))/2)
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnPIDA(self, msg):
self.pA = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnPIDB(self, msg):
self.pB = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnPIDAset(self, msg):
self.pAset = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnPIDBset(self, msg):
self.pBset = '%.1f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnStabilityA(self, msg):
if msg != '-':
self.stabilityA = '%.2f'%(float(msg))
else:
self.stabilityA = msg
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnStabilityB(self, msg):
if msg != '-':
self.stabilityB = '%.2f'%(float(msg))
else:
self.stabilityB = msg
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSeebeckHigh(self, msg):
self.seebeckhigh = '%.2f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnSeebeckLow(self, msg):
self.seebecklow = '%.2f'%(float(msg))
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnMeasurement(self, msg):
self.mea = msg
self.update_values()
#end def
#--------------------------------------------------------------------------
def OnTime(self, msg):
time = int(float(msg))
hours = str(time/3600)
minutes = int(time%3600/60)
if (minutes < 10):
minutes = '0%i'%(minutes)
else:
minutes = '%i'%(minutes)
seconds = int(time%60)
if (seconds < 10):
seconds = '0%i'%(seconds)
else:
seconds = '%i'%(seconds)
self.t = '%s:%s:%s'%(hours,minutes,seconds)
self.ctime = str(datetime.now())[11:19]
self.update_values()
#end def
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def create_status(self):
self.label_ctime = wx.StaticText(self, label="current time:")
self.label_ctime.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_t = wx.StaticText(self, label="run time (s):")
self.label_t.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_highV = wx.StaticText(self, label="voltage high ("+self.mu+"V):")
self.label_highV.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_lowV = wx.StaticText(self, label="voltage low ("+self.mu+"V):")
self.label_lowV.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_tA = wx.StaticText(self, label="temp A ("+self.celsius+"):")
self.label_tA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_tB = wx.StaticText(self, label="temp B ("+self.celsius+"):")
self.label_tB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_pA = wx.StaticText(self, label="pid A ("+self.celsius+"):")
self.label_pA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_pB = wx.StaticText(self, label="pid B ("+self.celsius+"):")
self.label_pB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_pAset = wx.StaticText(self, label="pid A setpoint ("+self.celsius+"):")
self.label_pAset.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_pBset = wx.StaticText(self, label="pid B setpoint ("+self.celsius+"):")
self.label_pBset.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_stabilityA = wx.StaticText(self, label="stability A ("+self.celsius+ "/min):")
self.label_stabilityA.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_stabilityB = wx.StaticText(self, label="stability B ("+self.celsius+ "/min):")
self.label_stabilityB.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_avgT = wx.StaticText(self, label="avg T ("+self.celsius+"):")
self.label_avgT.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_dT = wx.StaticText(self, label=self.delta+"T ("+self.celsius+"):")
self.label_dT.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_seebeckhigh = wx.StaticText(self, label="seebeck high ("+self.mu+"V/"+self.celsius+"):")
self.label_seebeckhigh.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_seebecklow = wx.StaticText(self, label="seebeck low ("+self.mu+"V/"+self.celsius+"):")
self.label_seebecklow.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.label_mea = wx.StaticText(self, label="seebeck measurement")
self.label_mea.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.ctimecurrent = wx.StaticText(self, label=self.ctime)
self.ctimecurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.tcurrent = wx.StaticText(self, label=self.t)
self.tcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.highVcurrent = wx.StaticText(self, label=self.highV)
self.highVcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.lowVcurrent = wx.StaticText(self, label=self.lowV)
self.lowVcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.tAcurrent = wx.StaticText(self, label=self.tA)
self.tAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.tBcurrent = wx.StaticText(self, label=self.tB)
self.tBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.pAcurrent = wx.StaticText(self, label=self.pA)
self.pAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.pBcurrent = wx.StaticText(self, label=self.pB)
self.pBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.pAsetcurrent = wx.StaticText(self, label=self.pAset)
self.pAsetcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.pBsetcurrent = wx.StaticText(self, label=self.pBset)
self.pBsetcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.stabilityAcurrent = wx.StaticText(self, label=self.stabilityA)
self.stabilityAcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.stabilityBcurrent = wx.StaticText(self, label=self.stabilityB)
self.stabilityBcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.avgTcurrent = wx.StaticText(self, label=self.avgT)
self.avgTcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.dTcurrent = wx.StaticText(self, label=self.dT)
self.dTcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.seebeckhighcurrent = wx.StaticText(self, label=self.seebeckhigh)
self.seebeckhighcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.seebecklowcurrent = wx.StaticText(self, label=self.seebecklow)
self.seebecklowcurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.meacurrent = wx.StaticText(self, label=self.mea)
self.meacurrent.SetFont(wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
#end def
#--------------------------------------------------------------------------
def update_values(self):
self.ctimecurrent.SetLabel(self.ctime)
self.tcurrent.SetLabel(self.t)
self.highVcurrent.SetLabel(self.highV)
self.lowVcurrent.SetLabel(self.lowV)
self.tAcurrent.SetLabel(self.tA)
self.tBcurrent.SetLabel(self.tB)
self.pAcurrent.SetLabel(self.pA)
self.pBcurrent.SetLabel(self.pB)
self.pAsetcurrent.SetLabel(self.pAset)
self.pBsetcurrent.SetLabel(self.pBset)
self.stabilityAcurrent.SetLabel(self.stabilityA)
self.stabilityBcurrent.SetLabel(self.stabilityB)
self.avgTcurrent.SetLabel(self.avgT)
self.dTcurrent.SetLabel(self.dT)
self.seebeckhighcurrent.SetLabel(self.seebeckhigh)
self.seebecklowcurrent.SetLabel(self.seebecklow)
self.meacurrent.SetLabel(self.mea)
#end def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(20,2)
sizer.Add(self.titlePanel, (0, 0), span = (1,2), border=5, flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.linebreak1,(1,0), span = (1,2))
sizer.Add(self.label_ctime, (2,0))
sizer.Add(self.ctimecurrent, (2, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_t, (3,0))
sizer.Add(self.tcurrent, (3, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_highV, (4, 0))
sizer.Add(self.highVcurrent, (4, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_lowV, (5,0))
sizer.Add(self.lowVcurrent, (5,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_tA, (6,0))
sizer.Add(self.tAcurrent, (6,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_stabilityA, (7,0))
sizer.Add(self.stabilityAcurrent, (7, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_pA, (8,0))
sizer.Add(self.pAcurrent, (8,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_pAset, (9,0))
sizer.Add(self.pAsetcurrent, (9,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_tB, (10,0))
sizer.Add(self.tBcurrent, (10,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_stabilityB, (11,0))
sizer.Add(self.stabilityBcurrent, (11, 1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_pB, (12,0))
sizer.Add(self.pBcurrent, (12,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_pBset, (13,0))
sizer.Add(self.pBsetcurrent, (13,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_avgT, (14,0))
sizer.Add(self.avgTcurrent, (14,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_dT, (15,0))
sizer.Add(self.dTcurrent, (15,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_seebeckhigh, (16,0))
sizer.Add(self.seebeckhighcurrent, (16,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_seebecklow, (17,0))
sizer.Add(self.seebecklowcurrent, (17,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.label_mea, (18,0))
sizer.Add(self.meacurrent, (18,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.linebreak2, (19,0), span = (1,2))
self.SetSizer(sizer)
#end def
#end class
###############################################################################
###############################################################################
class VoltagePanel(wx.Panel):
"""
GUI Window for plotting voltage data.
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
global filePath
global thighV_list
global highV_list
global tlowV_list
global lowV_list
self.create_title("Voltage Panel")
self.init_plot()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.create_control_panel()
self.create_sizer()
pub.subscribe(self.OnHighVoltage, "High Voltage")
pub.subscribe(self.OnHighVTime, "Time High Voltage")
pub.subscribe(self.OnLowVoltage, "Low Voltage")
pub.subscribe(self.OnLowVTime, "Time Low Voltage")
# For saving the plots at the end of data acquisition:
pub.subscribe(self.save_plot, "Save_All")
self.animator = animation.FuncAnimation(self.figure, self.draw_plot, interval=500, blit=False)
#end init
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def create_control_panel(self):
self.xmin_control = BoundControlBox(self, -1, "t min", 0)
self.xmax_control = BoundControlBox(self, -1, "t max", 100)
self.ymin_control = BoundControlBox(self, -1, "V min", -1000)
self.ymax_control = BoundControlBox(self, -1, "V max", 1000)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.ymax_control, border=5, flag=wx.ALL)
#end def
#--------------------------------------------------------------------------
def OnHighVoltage(self, msg):
self.highV = float(msg)
highV_list.append(self.highV)
thighV_list.append(self.thighV)
#end def
#--------------------------------------------------------------------------
def OnHighVTime(self, msg):
self.thighV = float(msg)
#end def
#--------------------------------------------------------------------------
def OnLowVoltage(self, msg):
self.lowV = float(msg)
lowV_list.append(self.lowV)
tlowV_list.append(self.tlowV)
#end def
#--------------------------------------------------------------------------
def OnLowVTime(self, msg):
self.tlowV = float(msg)
#end def
#--------------------------------------------------------------------------
def init_plot(self):
self.dpi = 100
self.colorH = 'g'
self.colorL = 'y'
self.figure = Figure((6,2), dpi=self.dpi)
self.subplot = self.figure.add_subplot(111)
self.lineH, = self.subplot.plot(thighV_list,highV_list, color=self.colorH, linewidth=1)
self.lineL, = self.subplot.plot(tlowV_list,lowV_list, color=self.colorL, linewidth=1)
self.legend = self.figure.legend( (self.lineH, self.lineL), (r"$V_{high}$",r"$V_{low}$"), (0.15,0.7),fontsize=8)
#self.subplot.text(0.05, .95, r'$X(f) = \mathcal{F}\{x(t)\}$', \
#verticalalignment='top', transform = self.subplot.transAxes)
#end def
#--------------------------------------------------------------------------
def draw_plot(self,i):
self.subplot.clear()
#self.subplot.set_title("voltage vs. time", fontsize=12)
self.subplot.set_ylabel(r"voltage ($\mu V$)", fontsize = 8)
self.subplot.set_xlabel("time (s)", fontsize = 8)
# Adjustable scale:
if self.xmax_control.is_auto():
xmax = max(thighV_list+tlowV_list)
else:
xmax = float(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = 0
else:
xmin = float(self.xmin_control.manual_value())
if self.ymin_control.is_auto():
minV = min(highV_list+lowV_list)
ymin = minV - abs(minV)*0.3
else:
ymin = float(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
maxV = max(highV_list+lowV_list)
ymax = maxV + abs(maxV)*0.3
else:
ymax = float(self.ymax_control.manual_value())
self.subplot.set_xlim([xmin, xmax])
self.subplot.set_ylim([ymin, ymax])
pylab.setp(self.subplot.get_xticklabels(), fontsize=8)
pylab.setp(self.subplot.get_yticklabels(), fontsize=8)
self.lineH, = self.subplot.plot(thighV_list,highV_list, color=self.colorH, linewidth=1)
self.lineL, = self.subplot.plot(tlowV_list,lowV_list, color=self.colorL, linewidth=1)
return (self.lineH, self.lineL)
#return (self.subplot.plot( thighV_list, highV_list, color=self.colorH, linewidth=1),
#self.subplot.plot( tlowV_list, lowV_list, color=self.colorL, linewidth=1))
#end def
#--------------------------------------------------------------------------
def save_plot(self, msg):
path = filePath + "/Voltage_Plot.png"
self.canvas.print_figure(path)
#end def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(3,1)
sizer.Add(self.titlePanel, (0, 0), flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.canvas, ( 1,0), flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.hbox1, (2,0), flag=wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
#end def
#end class
###############################################################################
###############################################################################
class TemperaturePanel(wx.Panel):
"""
GUI Window for plotting temperature data.
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
global filePath
global ttempA_list
global tempA_list
global ttempB_list
global tempB_list
global tpid_list
global pidA_list
global pidB_list
self.create_title("Temperature Panel")
self.init_plot()
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.create_control_panel()
self.create_sizer()
pub.subscribe(self.OnTimeTempA, "Time Temp A")
pub.subscribe(self.OnTempA, "Temp A")
pub.subscribe(self.OnTimeTempB, "Time Temp B")
pub.subscribe(self.OnTempB, "Temp B")
pub.subscribe(self.OnTimePID, "Time PID")
pub.subscribe(self.OnPIDA, "PID A")
pub.subscribe(self.OnPIDB, "PID B")
# For saving the plots at the end of data acquisition:
pub.subscribe(self.save_plot, "Save_All")
self.animator = animation.FuncAnimation(self.figure, self.draw_plot, interval=500, blit=False)
#end init
#--------------------------------------------------------------------------
def create_title(self, name):
self.titlePanel = wx.Panel(self, -1)
title = wx.StaticText(self.titlePanel, label=name)
font_title = wx.Font(16, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title.SetFont(font_title)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((0,-1))
hbox.Add(title, 0, wx.LEFT, 5)
self.titlePanel.SetSizer(hbox)
#end def
#--------------------------------------------------------------------------
def create_control_panel(self):
self.xmin_control = BoundControlBox(self, -1, "t min", 0)
self.xmax_control = BoundControlBox(self, -1, "t max", 100)
self.ymin_control = BoundControlBox(self, -1, "T min", 0)
self.ymax_control = BoundControlBox(self, -1, "T max", 500)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox1.Add(self.ymax_control, border=5, flag=wx.ALL)
#end def
#--------------------------------------------------------------------------
def OnTimeTempA(self, msg):
self.ttA = float(msg)
#end def
#--------------------------------------------------------------------------
def OnTempA(self, msg):
self.tA = float(msg)
tempA_list.append(self.tA)
ttempA_list.append(self.ttA)
#end def
#--------------------------------------------------------------------------
def OnTimeTempB(self, msg):
self.ttB = float(msg)
#end def
#--------------------------------------------------------------------------
def OnTempB(self, msg):
self.tB = float(msg)
tempB_list.append(self.tB)
ttempB_list.append(self.ttB)
#end def
#--------------------------------------------------------------------------
def OnTimePID(self, msg):
self.tpid = float(msg)
tpid_list.append(self.tpid)
pidA_list.append(self.pA)
pidB_list.append(self.pB)
#end def
#--------------------------------------------------------------------------
def OnPIDA(self, msg):
self.pA = float(msg)
#end def
#--------------------------------------------------------------------------
def OnPIDB(self, msg):
self.pB = float(msg)
#end def
#--------------------------------------------------------------------------
def init_plot(self):
self.dpi = 100
self.colorTA = 'r'
self.colorTB = 'b'
self.colorPA = 'm'
self.colorPB = 'c'
self.figure = Figure((6,2), dpi=self.dpi)
self.subplot = self.figure.add_subplot(111)
self.lineTA, = self.subplot.plot(ttempA_list,tempA_list, color=self.colorTA, linewidth=1)
self.lineTB, = self.subplot.plot(ttempB_list,tempB_list, color=self.colorTB, linewidth=1)
self.linePA, = self.subplot.plot(tpid_list,pidA_list, color=self.colorPA, linewidth=1)
self.linePB, = self.subplot.plot(tpid_list,pidB_list, color=self.colorPB, linewidth=1)
self.legend = self.figure.legend( (self.lineTA, self.linePA, self.lineTB, self.linePB), (r"$T_A$ (sample)",r"$T_A$ (PID)",r"$T_B$ (sample)",r"$T_B$ (PID)"), (0.15,0.50),fontsize=8)
#self.subplot.text(0.05, .95, r'$X(f) = \mathcal{F}\{x(t)\}$', \
#verticalalignment='top', transform = self.subplot.transAxes)
#end def
#--------------------------------------------------------------------------
def draw_plot(self,i):
self.subplot.clear()
#self.subplot.set_title("temperature vs. time", fontsize=12)
self.subplot.set_ylabel(r"temperature ($\degree C$)", fontsize = 8)
self.subplot.set_xlabel("time (s)", fontsize = 8)
# Adjustable scale:
if self.xmax_control.is_auto():
xmax = max(ttempA_list+ttempB_list+tpid_list)
else:
xmax = float(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = 0
else:
xmin = float(self.xmin_control.manual_value())
if self.ymin_control.is_auto():
minT = min(tempA_list+tempB_list+pidA_list+pidB_list)
ymin = minT - abs(minT)*0.3
else:
ymin = float(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
maxT = max(tempA_list+tempB_list+pidA_list+pidB_list)
ymax = maxT + abs(maxT)*0.3
else:
ymax = float(self.ymax_control.manual_value())
self.subplot.set_xlim([xmin, xmax])
self.subplot.set_ylim([ymin, ymax])
pylab.setp(self.subplot.get_xticklabels(), fontsize=8)
pylab.setp(self.subplot.get_yticklabels(), fontsize=8)
self.lineTA, = self.subplot.plot(ttempA_list,tempA_list, color=self.colorTA, linewidth=1)
self.lineTB, = self.subplot.plot(ttempB_list,tempB_list, color=self.colorTB, linewidth=1)
self.linePA, = self.subplot.plot(tpid_list,pidA_list, color=self.colorPA, linewidth=1)
self.linePB, = self.subplot.plot(tpid_list,pidB_list, color=self.colorPB, linewidth=1)
return (self.lineTA, self.lineTB, self.linePA, self.linePB)
#end def
#--------------------------------------------------------------------------
def save_plot(self, msg):
path = filePath + "/Temperature_Plot.png"
self.canvas.print_figure(path)
#end def
#--------------------------------------------------------------------------
def create_sizer(self):
sizer = wx.GridBagSizer(3,1)
sizer.Add(self.titlePanel, (0, 0),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.canvas, ( 1,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.hbox1, (2,0),flag=wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(sizer)
#end def
#end class
###############################################################################
###############################################################################
class Frame(wx.Frame):
"""
Main frame window in which GUI resides
"""
#--------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.init_UI()
self.create_statusbar()
self.create_menu()
pub.subscribe(self.update_statusbar, "Status Bar")
#end init
#--------------------------------------------------------------------------
def init_UI(self):
self.SetBackgroundColour('#E0EBEB')
self.userpanel = UserPanel(self, size=wx.DefaultSize)
self.statuspanel = StatusPanel(self,size=wx.DefaultSize)
self.voltagepanel = VoltagePanel(self, size=wx.DefaultSize)
self.temperaturepanel = TemperaturePanel(self, size=wx.DefaultSize)
self.statuspanel.SetBackgroundColour('#ededed')
sizer = wx.GridBagSizer(2, 3)
sizer.Add(self.userpanel, (0,0),flag=wx.ALIGN_CENTER_HORIZONTAL, span = (2,1))
sizer.Add(self.statuspanel, (0,2),flag=wx.ALIGN_CENTER_HORIZONTAL, span = (2,1))
sizer.Add(self.voltagepanel, (0,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Add(self.temperaturepanel, (1,1),flag=wx.ALIGN_CENTER_HORIZONTAL)
sizer.Fit(self)
self.SetSizer(sizer)
self.SetTitle('High Temp Seebeck GUI')
self.Centre()
#end def
#--------------------------------------------------------------------------
def create_menu(self):
# Menu Bar with File, Quit
menubar = wx.MenuBar()
fileMenu = wx.Menu()
qmi = wx.MenuItem(fileMenu, APP_EXIT, '&Quit\tCtrl+Q')
#qmi.SetBitmap(wx.Bitmap('exit.png'))
fileMenu.AppendItem(qmi)
self.Bind(wx.EVT_MENU, self.onQuit, id=APP_EXIT)
menubar.Append(fileMenu, 'File')
self.SetMenuBar(menubar)
#end def
#--------------------------------------------------------------------------
def onQuit(self, e):
global abort_ID
abort_ID=1
self.Destroy()
self.Close()
sys.stdout.close()
sys.stderr.close()
#end def
#--------------------------------------------------------------------------
def create_statusbar(self):
self.statusbar = ESB.EnhancedStatusBar(self, -1)
self.statusbar.SetSize((-1, 23))
self.statusbar.SetFieldsCount(8)
self.SetStatusBar(self.statusbar)
self.space_between = 10
### Create Widgets for the statusbar:
# Status:
self.status_text = wx.StaticText(self.statusbar, -1, "Ready")
self.width0 = 105
# Placer 1:
placer1 = wx.StaticText(self.statusbar, -1, " ")
# Title:
#measurement_text = wx.StaticText(self.statusbar, -1, "Measurement Indicators:")
#boldFont = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.BOLD)
#measurement_text.SetFont(boldFont)
#self.width1 = measurement_text.GetRect().width + self.space_between
# PID Tolerance:
pidTol_text = wx.StaticText(self.statusbar, -1, "Within PID Tolerance:")
self.width2 = pidTol_text.GetRect().width + self.space_between
self.indicator_tol = wx.StaticText(self.statusbar, -1, "-")
self.width3 = 25
# Stability Threshold:
stableThresh_text = wx.StaticText(self.statusbar, -1, "Within Stability Threshold:")
self.width4 = stableThresh_text.GetRect().width + 5
self.indicator_stable = wx.StaticText(self.statusbar, -1, "-")
self.width5 = self.width3
# Placer 2:
placer2 = wx.StaticText(self.statusbar, -1, " ")
# Version:
version_label = wx.StaticText(self.statusbar, -1, "Version: %s" % version)
self.width8 = version_label.GetRect().width + self.space_between
# Set widths of each piece of the status bar:
self.statusbar.SetStatusWidths([self.width0, 50, self.width2, self.width3, self.width4, self.width5, -1, self.width8])
### Add the widgets to the status bar:
# Status:
self.statusbar.AddWidget(self.status_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# Placer 1:
self.statusbar.AddWidget(placer1)
# Title:
#self.statusbar.AddWidget(measurement_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# PID Tolerance:
self.statusbar.AddWidget(pidTol_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
self.statusbar.AddWidget(self.indicator_tol, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# Stability Threshold:
self.statusbar.AddWidget(stableThresh_text, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
self.statusbar.AddWidget(self.indicator_stable, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
# Placer 2
self.statusbar.AddWidget(placer2)
# Version:
self.statusbar.AddWidget(version_label, ESB.ESB_ALIGN_CENTER_HORIZONTAL, ESB.ESB_ALIGN_CENTER_VERTICAL)
#end def
#--------------------------------------------------------------------------
def update_statusbar(self, msg):
string = msg
# Status:
if string == 'Running' or string == 'Finished, Ready' or string == 'Exception Occurred' or string=='Checking':
self.status_text.SetLabel(string)
self.status_text.SetBackgroundColour(wx.NullColour)
if string == 'Exception Occurred':
self.status_text.SetBackgroundColour("RED")
#end if
#end if
else:
tol = string[0]
stable = string[1]
# PID Tolerance indicator:
self.indicator_tol.SetLabel(tol)
if tol == 'OK':
self.indicator_tol.SetBackgroundColour("GREEN")
#end if
else:
self.indicator_tol.SetBackgroundColour("RED")
#end else
# Stability Threshold indicator:
self.indicator_stable.SetLabel(stable)
if stable == 'OK':
self.indicator_stable.SetBackgroundColour("GREEN")
#end if
else:
self.indicator_stable.SetBackgroundColour("RED")
#end else
#end else
#end def
#end class
###############################################################################
###############################################################################
class App(wx.App):
"""
App for initializing program
"""
#--------------------------------------------------------------------------
def OnInit(self):
self.frame = Frame(parent=None, title="High Temp Seebeck GUI", size=(1280,1280))
self.frame.Show()
setup = Setup()
return True
#end init
#end class
###############################################################################
#==============================================================================
if __name__=='__main__':
app = App()
app.MainLoop()
#end if
| gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/linear_model/tests/test_randomized_l1.py | 30 | 8448 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.linear_model.randomized_l1 import(lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import _preprocess_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
@ignore_warnings(category=DeprecationWarning)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
@ignore_warnings(category=DeprecationWarning)
def test_randomized_lasso_error_memory():
scaling = 0.3
selection_threshold = 0.5
tempdir = 5
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold,
memory=tempdir)
assert_raises_regex(ValueError, "'memory' should either be a string or"
" a sklearn.externals.joblib.Memory instance",
clf.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
n_resampling = 20
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling, n_resampling=n_resampling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling, n_resampling=n_resampling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# test caching
try:
tempdir = mkdtemp()
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold,
memory=tempdir)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
finally:
shutil.rmtree(tempdir)
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling, n_resampling=100)
feature_scores = clf.fit(X, y).scores_
assert_allclose(feature_scores, [1., 1., 1., 0.225, 1.], rtol=0.2)
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_lasso_precompute():
# Check randomized lasso for different values of precompute
n_resampling = 20
alpha = 1
random_state = 42
G = np.dot(X.T, X)
clf = RandomizedLasso(alpha=alpha, random_state=random_state,
precompute=G, n_resampling=n_resampling)
feature_scores_1 = clf.fit(X, y).scores_
for precompute in [True, False, None, 'auto']:
clf = RandomizedLasso(alpha=alpha, random_state=random_state,
precompute=precompute, n_resampling=n_resampling)
feature_scores_2 = clf.fit(X, y).scores_
assert_array_equal(feature_scores_1, feature_scores_2)
@ignore_warnings(category=DeprecationWarning)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[[1., 0.5]])
assert_raises(ValueError, clf.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
# labels should not be centered
X, _, _, _, _ = _preprocess_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
def test_warning_raised():
scaling = 0.3
selection_threshold = 0.5
tempdir = 5
assert_warns_message(DeprecationWarning, "The function"
" lasso_stability_path is "
"deprecated in 0.19 and will be removed in 0.21.",
lasso_stability_path, X, y, scaling=scaling,
random_state=42, n_resampling=30)
assert_warns_message(DeprecationWarning, "Class RandomizedLasso is"
" deprecated; The class RandomizedLasso is "
"deprecated in 0.19 and will be removed in 0.21.",
RandomizedLasso, verbose=False, alpha=[1, 0.8],
random_state=42, scaling=scaling,
selection_threshold=selection_threshold,
memory=tempdir)
assert_warns_message(DeprecationWarning, "The class"
" RandomizedLogisticRegression is "
"deprecated in 0.19 and will be removed in 0.21.",
RandomizedLogisticRegression,
verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
| mit |
kejbaly2/comdev | comdev/firebase_api.py | 1 | 2104 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Chris Ward <[email protected]>
'''
'''
import logging
import pandas as pd
import pyrebase
import ipdb # NOQA
from comdev.lib import load_config, expand_path
class Firebaser(object):
_firebase_api = None
def __init__(self, app_name):
config = self.config = load_config(app_name)['firebase'].get()
self.auth_conf = {
"apiKey": config['api_key'],
"authDomain": config['auth_domain'],
"databaseURL": config['db_url'],
"storageBucket": config['storage_bucket'],
"serviceAccount": expand_path(config['service_account']),
}
@property
def firebase(self):
if not self._firebase_api:
self._firebase_api = pyrebase.initialize_app(self.auth_conf)
return self._firebase_api
@property
def db(self):
# Get a reference to the database service
return self.firebase.database()
def auth_user(self, login, password=None):
auth = self.firebase.auth()
login = login or self.config.get('user_email')
password = password or self.config.get('user_password')
if login and password:
user = auth.sign_in_with_email_and_password(login, password)
elif login:
# Get a reference to the auth service
token = auth.create_custom_token(login)
user = auth.sign_in_with_custom_token(token)
else:
raise ValueError(
'Invalid user credentials. Check login and password.')
id_token = user['idToken']
return id_token
def get(self, path):
db = self.db
child = [db.child(child_path) for child_path in path.split('.')][-1]
# we should now be holding the correct db path reference in db var
# grab the data
data = child.get()
data = {row.key(): row.val() for row in data.each()}
df = pd.DataFrame.from_dict(data, 'index')
return df
log = logging.getLogger(__name__)
if __name__ == '__main__':
ipdb.set_trace()
| gpl-3.0 |
loli/sklearn-ensembletrees | examples/covariance/plot_outlier_detection.py | 14 | 3892 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as outliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
rom1mouret/assortment | kcross_validate_mdl_selection.py | 1 | 5781 | #!/usr/bin/env python3
import argparse
import sklearn.utils
import numpy as np
from sklearn.preprocessing import StandardScaler, Imputer
from sklearn.metrics import classification_report, accuracy_score
from gating_models import *
def load_dataset(filename, number_of_detectors, to_ignore):
n = number_of_detectors
X_rows = []
performances_rows = []
row_to_dataset = []
with open(filename, "r") as f:
for line in f:
line = line.strip()
parts = line.split(",")
dataset = parts[0]
row_to_dataset.append(dataset)
names = parts[1:1+n]
# performances
performances = np.array(list(map(float, parts[1+n:1+2*n])))
performances = performances.reshape((1, performances.shape[0]))
performances_rows.append(performances)
# features
features = np.array(list(map(float, parts[1+2*n:])))
features = features.reshape((1, features.shape[0]))
X_rows.append(features)
# features
X = np.concatenate(X_rows, axis=0)
imputer = Imputer(missing_values=np.nan)
X = imputer.fit_transform(X)
scaler = StandardScaler()
X = scaler.fit_transform(X)
# performances
performances = np.concatenate(performances_rows, axis=0)
# remove the detectors to ignore
to_ignore = set([name.lower() for name in to_ignore])
to_keep = [i for i, name in enumerate(names) if name.lower() not in to_ignore]
agreement_indexing = agreement_feature_indexing(number_of_detectors)
complexity_indexing = complexity_feature_indexing(number_of_detectors, X.shape[1])
new_order = []
for i in range(len(to_keep)):
for j in range(i+1, len(to_keep)):
new_order.append(agreement_indexing[(to_keep[i], to_keep[j])])
for col in to_keep:
new_order += complexity_indexing[col]
X = X[:, new_order]
performances = performances[:, to_keep]
names = [names[i] for i in to_keep]
# shuffling is necessary for some training algorithms and the kcross
X, row_to_dataset, performances = \
sklearn.utils.shuffle(X, row_to_dataset, performances)
return X, row_to_dataset, performances, names
if __name__ == "__main__":
# options
parser = argparse.ArgumentParser(description='Outlier Detection Benchmarks')
parser.add_argument('dataset', metavar='dataset', type=str, nargs=1, help="ensemble learning dataset")
parser.add_argument('-n', metavar='num detectors', type=int, nargs='?', default=4, help="number of outlier detectors")
parser.add_argument('--exclude', type=str, nargs='*', default=[], help="names of the detectors to exclude from the model selection")
parser.add_argument('--features', metavar='feature index', type=int, nargs='*', default=[0, 1], help="indices of the active features")
args = vars(parser.parse_args())
dataset = args['dataset'][0]
n = args['n']
to_ignore = args['exclude']
active_features = args['features']
X, row_to_dataset, performances, names = load_dataset(dataset, n, to_ignore)
n = performances.shape[1] # because 'n' can be changed by the exclude option
# labels
labels = performances.argmax(axis=1)
# shuffling is necessary for some models
X, row_to_dataset, performances, labels = \
sklearn.utils.shuffle(X, row_to_dataset, performances, labels)
# kcross
y_true = []
y_pred = []
avg_performance = 0
all_datasets = set(row_to_dataset)
for dataset in all_datasets:
training_indices = [i for i, d in enumerate(row_to_dataset)
if d != dataset and np.max(performances[i]) > 0.1]
testing_indices = [i for i, d in enumerate(row_to_dataset)
if d == dataset]
for _ in range(50):
per_detector_pred = []
sub_indices = np.random.choice(training_indices, 3*len(training_indices)//4, replace=False)
for m in range(n):
#model = MLfreeModel()
#model = RelativeperformanceModel()
#model = IndirectperformanceModel()
model = AbsolutePrecisionModel(active_features)
#model = DeltaPrecisionNarrowModel()
#model = DeltaPrecisionModel()
#model = DifferentialModel(active_features)
model.set_detector_index(m, n)
model.fit(X[sub_indices], performances[sub_indices])
predictions = model.predict(X[testing_indices])
per_detector_pred.append(predictions.reshape((predictions.shape[0], 1)))
estimations = np.concatenate(per_detector_pred, axis=1)
predicted_labels = estimations.argmax(axis=1)
y_pred += predicted_labels.tolist()
y_true += labels[testing_indices].tolist()
local_performance = performances[testing_indices, predicted_labels]
avg_performance += np.sum(local_performance)
avg_performance /= len(y_pred)
# classification reporting
report = classification_report(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
print("accuracy %f%%" % (100*accuracy))
print(report)
# performance reporting
print("AVG performance(ensemble) = %0.2f%%" % (100*avg_performance))
for i, name in enumerate(names):
p = np.mean(performances[:, i])
print("AVG performance(%s) = %0.2f%%" % (name, 100*p))
print("AVG MAX performance = %0.2f%%" % (100*np.mean(np.max(performances, axis=1))))
# plots
for m in range(n):
model = AbsolutePrecisionModel(active_features)
model.set_detector_index(m, n)
model.fit(X, performances)
model.plot(X, performances, names[m])
| apache-2.0 |
cloud-fan/spark | python/pyspark/sql/pandas/conversion.py | 19 | 23509 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from collections import Counter
from pyspark.rdd import _load_from_socket
from pyspark.sql.pandas.serializers import ArrowCollectSerializer
from pyspark.sql.types import IntegralType
from pyspark.sql.types import ByteType, ShortType, IntegerType, LongType, FloatType, \
DoubleType, BooleanType, MapType, TimestampType, StructType, DataType
from pyspark.traceback_utils import SCCallSiteSync
class PandasConversionMixin(object):
"""
Min-in for the conversion from Spark to pandas. Currently, only :class:`DataFrame`
can use this class.
"""
def toPandas(self):
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting Pandas's :class:`DataFrame` is
expected to be small, as all the data is loaded into the driver's memory.
Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
Examples
--------
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import numpy as np
import pandas as pd
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
if self.sql_ctx._conf.arrowPySparkEnabled():
use_arrow = True
try:
from pyspark.sql.pandas.types import to_arrow_schema
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if self.sql_ctx._conf.arrowPySparkFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % str(e))
warnings.warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to "
"false.\n %s" % str(e))
warnings.warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.
if use_arrow:
try:
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# Rename columns to avoid duplicated column names.
tmp_column_names = ['col_{}'.format(i) for i in range(len(self.columns))]
self_destruct = self.sql_ctx._conf.arrowPySparkSelfDestructEnabled()
batches = self.toDF(*tmp_column_names)._collect_as_arrow(
split_batches=self_destruct)
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
# Ensure only the table has a reference to the batches, so that
# self_destruct (if enabled) is effective
del batches
# Pandas DataFrame created from PyArrow uses datetime64[ns] for date type
# values, but we should use datetime.date to match the behavior with when
# Arrow optimization is disabled.
pandas_options = {'date_as_object': True}
if self_destruct:
# Configure PyArrow to use as little memory as possible:
# self_destruct - free columns as they are converted
# split_blocks - create a separate Pandas block for each column
# use_threads - convert one column at a time
pandas_options.update({
'self_destruct': True,
'split_blocks': True,
'use_threads': False,
})
pdf = table.to_pandas(**pandas_options)
# Rename back to the original column names.
pdf.columns = self.columns
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_localize_timestamps(pdf[field.name], timezone)
elif isinstance(field.dataType, MapType):
pdf[field.name] = \
_convert_map_items_to_dict(pdf[field.name])
return pdf
else:
return pd.DataFrame.from_records([], columns=self.columns)
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and can not continue. Note that "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an "
"effect on failures in the middle of "
"computation.\n %s" % str(e))
warnings.warn(msg)
raise
# Below is toPandas without Arrow optimization.
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
column_counter = Counter(self.columns)
dtype = [None] * len(self.schema)
for fieldIdx, field in enumerate(self.schema):
# For duplicate column name, we use `iloc` to access it.
if column_counter[field.name] > 1:
pandas_col = pdf.iloc[:, fieldIdx]
else:
pandas_col = pdf[field.name]
pandas_type = PandasConversionMixin._to_corrected_pandas_type(field.dataType)
# SPARK-21766: if an integer field is nullable and has null values, it can be
# inferred by pandas as float column. Once we convert the column with NaN back
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
# float type, not the corrected type from the schema in this case.
if pandas_type is not None and \
not(isinstance(field.dataType, IntegralType) and field.nullable and
pandas_col.isnull().any()):
dtype[fieldIdx] = pandas_type
# Ensure we fall back to nullable numpy types, even when whole column is null:
if isinstance(field.dataType, IntegralType) and pandas_col.isnull().any():
dtype[fieldIdx] = np.float64
if isinstance(field.dataType, BooleanType) and pandas_col.isnull().any():
dtype[fieldIdx] = np.object
df = pd.DataFrame()
for index, t in enumerate(dtype):
column_name = self.schema[index].name
# For duplicate column name, we use `iloc` to access it.
if column_counter[column_name] > 1:
series = pdf.iloc[:, index]
else:
series = pdf[column_name]
if t is not None:
series = series.astype(t, copy=False)
# `insert` API makes copy of data, we only do it for Series of duplicate column names.
# `pdf.iloc[:, index] = pdf.iloc[:, index]...` doesn't always work because `iloc` could
# return a view or a copy depending by context.
if column_counter[column_name] > 1:
df.insert(index, column_name, series, allow_duplicates=True)
else:
df[column_name] = series
pdf = df
if timezone is None:
return pdf
else:
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
for field in self.schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_convert_timestamps_local_tz(pdf[field.name], timezone)
return pdf
@staticmethod
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas :class:`DataFrame`, the inferred data type
may be wrong. This method gets the corrected data type for Pandas if that type may be
inferred incorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == LongType:
return np.int64
elif type(dt) == FloatType:
return np.float32
elif type(dt) == DoubleType:
return np.float64
elif type(dt) == BooleanType:
return np.bool
elif type(dt) == TimestampType:
return np.datetime64
else:
return None
def _collect_as_arrow(self, split_batches=False):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
This is an experimental feature.
:param split_batches: split batches such that each column is in its own allocation, so
that the selfDestruct optimization is effective; default False.
.. note:: Experimental.
"""
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, DataFrame)
with SCCallSiteSync(self._sc):
port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
try:
batch_stream = _load_from_socket((port, auth_secret), ArrowCollectSerializer())
if split_batches:
# When spark.sql.execution.arrow.pyspark.selfDestruct.enabled, ensure
# each column in each record batch is contained in its own allocation.
# Otherwise, selfDestruct does nothing; it frees each column as its
# converted, but each column will actually be a list of slices of record
# batches, and so no memory is actually freed until all columns are
# converted.
import pyarrow as pa
results = []
for batch_or_indices in batch_stream:
if isinstance(batch_or_indices, pa.RecordBatch):
batch_or_indices = pa.RecordBatch.from_arrays([
# This call actually reallocates the array
pa.concat_arrays([array])
for array in batch_or_indices
], schema=batch_or_indices.schema)
results.append(batch_or_indices)
else:
results = list(batch_stream)
finally:
# Join serving thread and raise any exceptions from collectAsArrowToPython
jsocket_auth_server.getResult()
# Separate RecordBatches from batch order indices in results
batches = results[:-1]
batch_order = results[-1]
# Re-order the batch list using the correct order
return [batches[i] for i in batch_order]
class SparkConversionMixin(object):
"""
Min-in for the conversion from pandas to Spark. Currently, only :class:`SparkSession`
can use this class.
"""
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
from pyspark.sql import SparkSession
assert isinstance(self, SparkSession)
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
timezone = self._wrapped._conf.sessionLocalTimeZone()
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, str) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
if self._wrapped._conf.arrowPySparkFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % str(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic "
"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' "
"has been set to false.\n %s" % str(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
return self._create_dataframe(data, schema, samplingRatio, verifySchema)
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
Returns
-------
list
list of records
"""
from pyspark.sql import SparkSession
assert isinstance(self, SparkSession)
if timezone is not None:
from pyspark.sql.pandas.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
Parameters
----------
rec : numpy.record
a numpy record to check field dtypes
Returns
-------
numpy.dtype
corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in range(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.sql import SparkSession
from pyspark.sql.dataframe import DataFrame
assert isinstance(self, SparkSession)
from pyspark.sql.pandas.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import TimestampType
from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type
from pyspark.sql.pandas.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf.iloc[start:start + step] for start in range(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.conversion
globs = pyspark.sql.pandas.conversion.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.conversion tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.conversion, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
srinathv/bokeh | bokeh/charts/_properties.py | 2 | 6513 | """ Properties for modeling Chart inputs, constraints, and dependencies.
selection spec:
[['x'], ['x', 'y']]
[{'x': categorical, 'y': numerical}]
"""
from __future__ import absolute_import
import numpy as np
import pandas as pd
from bokeh.properties import (HasProps, Either, String, Int, List, Bool,
PrimitiveProperty, bokeh_integer_types, Array)
from .utils import special_columns, title_from_columns
class Column(Array):
def _is_seq(self, value):
is_array = super(Column, self)._is_seq(value)
return isinstance(value, pd.Series) or isinstance(value, list) or is_array
def _new_instance(self, value):
return pd.Series(value)
def transform(self, value):
if value is None:
return None
if isinstance(value, pd.Series):
arr = value.values
else:
arr = value
trans_array = super(Column, self).transform(arr)
try:
return pd.Series(trans_array)
except ValueError:
raise ValueError("Could not transform %r" % value)
class Logical(Bool):
"""A boolean like data type."""
def validate(self, value):
try:
super(Logical, self).validate(value)
except ValueError:
if isinstance(value, list):
value = np.array(value)
# If not a Bool, then look for psuedo-logical types
if isinstance(value, np.ndarray):
values = np.unique(value)
if len(values) == 2:
return
raise ValueError('expected a Bool or array with 2 unique values, got %s' % value)
class ColumnLabel(Either):
"""Specify a column by name or index."""
def __init__(self, columns=None, default=None, help=None):
# ToDo: make sure we can select by integer
types = (String,
Int)
self.columns = columns
super(ColumnLabel, self).__init__(*types, default=default, help=help)
def validate(self, value):
"""If we are given a column list, make sure that the column provided is valid."""
super(ColumnLabel, self).validate(value)
if self.columns:
if type(value) in bokeh_integer_types:
if len(self.columns) > value:
return
else:
raise ValueError("Not a valid column selection.")
else:
if value not in self.columns:
raise ValueError("Column provided is not in the list of valid columns: %s" % self.columns)
def __str__(self):
return "Column Name or Column String"
class Dimension(HasProps):
"""Configures valid Chart column selections.
A dimension is Chart property that is assigned one or more columns names or indices. Each
column can match one or more column types, which are important to charts,
because the type of column selection can greatly affect the behavior of generalized
Charts.
The Dimension also provides convenient utilities for accessing information
about the current provided configuration at the global, non-grouped level.
"""
name = String()
alt_names = Either(String, List(String), default=None)
columns = Either(ColumnLabel, List(ColumnLabel), default=None)
valid = Either(PrimitiveProperty, List(PrimitiveProperty), default=None)
invalid = Either(PrimitiveProperty, List(PrimitiveProperty), default=None)
selection = Either(ColumnLabel, List(ColumnLabel), default=None)
def __init__(self, name, **properties):
properties['name'] = name
super(Dimension, self).__init__(**properties)
self._data = pd.DataFrame()
self._chart_source = None
def get_valid_types(self, col_data):
"""Returns all property types that are matched."""
valid_types = list(self.valid)
matches = []
# validate each type on the provided column
for valid_type in valid_types:
prop = valid_type()
# if valid, append to the output
try:
prop.validate(col_data)
matches.append(valid_type)
except ValueError:
pass
return matches
@property
def data(self):
"""The data selected for the Dimension.
Returns pd.Series(1) if data is empty or no selection.
"""
if self._data.empty or self.selection is None:
return pd.Series(1)
else:
# return special column type if available
if self.selection in list(special_columns.keys()):
return special_columns[self.selection](self._data)
return self._data[self.selection]
def set_data(self, data):
"""Builder must provide data so that builder has access to configuration metadata."""
self.selection = data[self.name]
self._chart_source = data
self._data = data.df
self.columns = list(self._data.columns.values)
@property
def min(self):
"""The minimum of one to many column selections."""
if isinstance(self.data, pd.Series):
return self.data.min()
else:
return self.data.min(axis=1).min()
@property
def max(self):
"""The maximum of one to many column selections."""
if isinstance(self.data, pd.Series):
return self.data.max()
else:
return self.data.max(axis=1).max()
@property
def dtype(self):
if isinstance(self.data, pd.DataFrame):
return self.data.dtypes[self.selection[0]]
else:
return self.data.dtype
@property
def computed(self):
if self._chart_source is None:
return False
else:
return self._chart_source.is_computed(self.selection)
@property
def selected_title(self):
"""A title formatted representation of selected columns."""
return title_from_columns(self.selection)
class EitherColumn(Either):
"""Allow providing option of column types."""
# ToDo: incorporate fix into Either
def matches(self, new, old):
comparison = super(EitherColumn, self).matches(new, old)
if isinstance(comparison, bool):
return comparison
elif isinstance(comparison, pd.Series):
return comparison.all()
else:
raise ValueError('Failed when comparing Columns') | bsd-3-clause |
stnava/iTensorFlow | src/keras/veryBasicKerasImageConvnet.py | 1 | 5514 | # very trivial example that shows the basics of data creation
# and building a simple neural network in keras - with train/test
## standard imports
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import pandas as pd
import os
import nibabel as nib
from PIL import Image
from scipy.misc import toimage
from keras.preprocessing import image
from keras.models import Model, Sequential
from keras.layers import merge, Dense, Input, Flatten, Dropout, Activation
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
base_dir = os.environ.get('HOME')+'/code/iTensorFlow/'
img_fn = os.path.join( base_dir,'data/dim2D/regression/spheresRad/train/singlechannel/Image_all.npz')
com_path= os.path.join( base_dir,'data/dim2D/regression/spheresRad/train/singlechannel/spheres2Radius.csv')
teimg_fn = os.path.join( base_dir,'data/dim2D/regression/spheresRad/test/singlechannel/Image_all.npz')
tecom_path= os.path.join( base_dir,'data/dim2D/regression/spheresRad/test/singlechannel/spheres2Radius.csv')
# read numpy data
X_train = np.load( img_fn )['arr_0']
Y_train = np.array( pd.read_csv(com_path) )
X_test = np.load( teimg_fn )['arr_0']
Y_test = np.array( pd.read_csv(tecom_path) )
nx = X_test.shape[1]
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, nx, nx)
X_test = X_test.reshape(X_test.shape[0], 1, nx, nx)
input_shape = (1, nx, nx)
else:
X_train = X_train.reshape(X_train.shape[0], nx, nx, 1)
X_test = X_test.reshape(X_test.shape[0], nx, nx, 1)
input_shape = (nx, nx, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 2.5
X_test /= 2.5
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
def unet_conv():
img_rows = img_cols = nx
inputs = Input((1, img_rows, img_cols))
conv1 = Convolution2D(2, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(4, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(4, 3, 3, activation='relu', border_mode='same')(up9)
flat1 = Flatten()(conv9)
conv10 = Dense( Y_test.shape[1] )(flat1)
model = Model(input=inputs, output=conv10)
return model
def mnist_conv():
nb_filters = 32
kernel_size = (3, 3)
pool_size = (2, 2)
model = Sequential()
model.add(Convolution2D( nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.05))
model.add(Dense( Y_test.shape[1] ))
return model
model = mnist_conv()
rms = RMSprop()
model.compile( loss='mse', optimizer=rms, metrics=['mse'] )
batch_size = 32
nb_epoch = 50
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2, validation_data=(X_test, Y_test))
trscore = model.evaluate(X_train, Y_train, verbose=0)
print('Train score:', trscore[0])
print('Train accuracy:', trscore[1])
tescore = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', tescore[0])
print('Test accuracy:', tescore[1])
Y_pred = model.predict( X_train )
for i in range(Y_test.shape[1]):
print( np.corrcoef(Y_train[:,i],Y_pred[:,i])[0,1] )
Y_pred = model.predict( X_test )
for i in range(Y_test.shape[1]):
print( np.corrcoef(Y_test[:,i],Y_pred[:,i])[0,1] )
i = 2
x, y = Y_test[:,i],Y_pred[:,i]
# plt.scatter(x, y, alpha=.1, s=400)
# plt.show()
| apache-2.0 |
Vimos/scikit-learn | sklearn/utils/setup.py | 77 | 2993 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.pyx'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.pyx'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension('murmurhash',
sources=['murmurhash.pyx', join(
'src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.pyx', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.pyx'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/datasets/__init__.py | 15 | 3741 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| gpl-2.0 |
RajivThamburaj/PyTracer | PyTracer/display.py | 1 | 5881 | """
Classes for storing and rendering geometric objects
Author: Rajiv Thamburaj
"""
import constants as const
import solids
import numpy as np
import matplotlib.pyplot as plt
import sample
import math
import random
class World(object):
"""
Models and renders a 'world' of objects
"""
def __init__(self):
"""
Initializer
"""
self.build()
self.pixels = np.zeros((self.sr.screen_width, self.sr.screen_height, 3))
self.render()
self.display_image()
def build(self):
"""
Places objects in the World
"""
# Set up the screen rectangle (stores information to render the scene)
self.sr = ScreenRect()
self.sr.screen_width = 200
self.sr.screen_height = 200
self.sr.pixel_width = 1.0
self.sr.num_samples = 1
self.sr.sampler = sample.UniformSampler(self.sr.num_samples)
# Set up instance variables
self.background_color = const.BLACK
self.tracer = MultiplePrimitivesTracer(self)
self.view_distance = 200
self.plane_distance = 100
# Sphere 1
center = np.array([0, -25, 0], float)
radius = 80
color = const.RED
sphere_1 = solids.Sphere(center, radius, color)
# Sphere 2
center = np.array([0, 30, 0], float)
radius = 60
color = const.YELLOW
sphere_2 = solids.Sphere(center, radius, color)
# Plane
point = np.array([0, 0, 0], float)
normal = np.array([0, 1, 1], float)
color = np.array([0, 0.3, 0], float)
plane = solids.Plane(point, normal, color)
self.objects = [sphere_1, sphere_2, plane]
def render(self):
"""
Renders the image, pixel by pixel
"""
# Origin for the rays on the z-axis
z_w = 100.0
ray_direction = np.array([0, 0, -1], float)
# Find the side length of the sample rectangle
n = int(math.sqrt(self.sr.num_samples))
for i in xrange(0, self.sr.screen_height):
self.print_progress(i)
for j in xrange(0, self.sr.screen_width):
pixel_color = const.BLACK
n = self.sr.num_samples
# Loop through all samples
for p in xrange(n):
# Get the next sampling point in the set [0,1] x [0,1]
sample_point = self.sr.sampler.get_square_sample()
# Find the point within the current pixel to sample
x = self.sr.pixel_width * (j - 0.5*self.sr.screen_width + sample_point[0])
y = self.sr.pixel_width * (i - 0.5*self.sr.screen_height + sample_point[1])
ray_origin = np.array([x, y, z_w], float)
# Add the color to the current value
pixel_color = pixel_color + self.tracer.trace_ray(ray_origin, ray_direction)
# Take the average of the colors
pixel_color = pixel_color*(1.0/self.sr.num_samples)
# Store the pixel color
self.add_pixel(i, j, pixel_color)
def render_perspective(self):
"""
Renders the image, pixel by pixel, with perspective tracing
"""
# Origin for the rays on the z-axis (common)
ray_origin = np.array([0, 0, self.view_distance], float)
for i in xrange(0, self.sr.screen_height):
self.print_progress(i)
for j in xrange(0, self.sr.screen_width):
# Find the direction of the ray
d_x = self.sr.pixel_width * (j - 0.5*(self.sr.screen_width - 1.0))
d_y = self.sr.pixel_width * (i - 0.5*(self.sr.screen_height - 1.0))
d_z = -self.plane_distance
ray_direction = np.array([d_x, d_y, d_z], float)
ray_direction = ray_direction * (1.0/np.linalg.norm(ray_direction))
# Find the color of the pixel and store it
pixel_color = self.tracer.trace_ray(ray_origin, ray_direction)
self.add_pixel(i, j, pixel_color)
def print_progress(self, outer_loop_index):
"""
Print how far the render has progressed
"""
percent_progress = 100.0*outer_loop_index/self.sr.screen_height
print str(percent_progress) + "%"
def add_pixel(self, row, column, color):
"""
Adds the pixel color to the numpy array of pixels
"""
if self.sr.gamma != 1.0:
color = color**(1.0/self.sr.gamma)
x = column
y = self.sr.screen_height - row - 1
# The internal coordinates must be mapped to the representation in the array
self.pixels[y,x] = color
def display_image(self):
"""
Displays the image on the screen
"""
plt.imshow(self.pixels)
plt.axis("off")
plt.show()
def hit_primitives(self, ray_origin, ray_direction):
"""
Find the closest hit point for the given ray
"""
t_min = float("inf")
shade_rectangle = ShadeRectangle(self)
# Loop through all objects
for object in self.objects:
if object.did_hit(ray_origin, ray_direction, shade_rectangle):
# Find the closest point of intersection
if object.t_min < t_min:
shade_rectangle.did_hit = True
t_min = object.t_min
shade_rectangle.color = object.color
return shade_rectangle
class ScreenRect(object):
"""
Keeps track of information needed to render the current scene
"""
def __init__(self):
"""
Initializer
"""
# Set default values
self.screen_width = 100
self.screen_height = 100
self.pixel_width = 1.0
self.gamma = 1.0
class ShadeRectangle(object):
"""
Keeps track of information needed to shade a ray's hit point
"""
def __init__(self, world):
"""
Initializer
"""
self.world = world
self.did_hit = False
self.normal = None
self.local_hit_point = None
class Tracer(object):
"""
Specifies a set of rules for the way an object should be rendered
"""
def __init__(self, world):
"""
Initializer
"""
self.world = world
def trace_ray(self, ray_origin, ray_direction):
"""
Determines the color of the pixel for the given ray
"""
return const.BLACK
class MultiplePrimitivesTracer(Tracer):
"""
Tracer for drawing multiple primitives
"""
def trace_ray(self, ray_origin, ray_direction):
"""
Determines the color of the pixel for the given ray
"""
shade_rectangle = self.world.hit_primitives(ray_origin, ray_direction)
if shade_rectangle.did_hit:
return shade_rectangle.color
else:
return self.world.background_color | mit |
xancandal/hdf5-heat | heat.h5.py | 1 | 1182 | import h5py
import matplotlib
matplotlib.use("GTKAgg") # Change this as desired.
import gobject
from pylab import *
# if len(sys.argv) != 3:
# print( "Error: invalid arguments" )
# print( "usage: heat <h5filename> <Value of z-plane slab>" )
# exit()
# # Obtain filename from command-line parameters
# filename = sys.argv[1]
# section = sys.argv[2]
#### Value of z-plane slab
section = 50
# Open file
#### file = h5py.File( filename, "r" )
file = h5py.File( "data.h5", "r" )
# Extract temperature data
temperature = file["temperature"]
# Function called for updating the figure
def updatefig(*args):
global temperature, frame
im.set_array(temperature[frame,:,:,section])
manager.canvas.draw()
frame+=1
print "Rendering timestep t=",frame
if(frame>=len(temperature)):
return False
return True
# Create figure and plot initial data
fig = plt.figure(1)
img = subplot(111)
im = img.imshow( temperature[0,:,:,section], cmap=cm.hot, interpolation="nearest", origin="lower", vmax=1.01 )
manager = get_current_fig_manager()
frame = 1
fig.colorbar(im)
# Whenever idle, update the figure while updatefig returns True
gobject.idle_add( updatefig )
show()
| gpl-3.0 |
jungla/ICOM-fluidity-toolbox | 2D/U/plot_PV.py | 1 | 4101 | from memory_profiler import memory_usage
from matplotlib.colors import LinearSegmentedColormap
import os, sys
import gc
import fio, myfun
import vtktools
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
gc.enable()
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
label = 'r_3k_B_1F0'
basename = 'ring'
dayi = 100
dayf = 101
days = 1
label = sys.argv[1]
basename = sys.argv[2]
dayi = int(sys.argv[3])
dayf = int(sys.argv[4])
days = int(sys.argv[5])
path = '/tamay2/mensa/fluidity/'+label+'/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
file0 = basename+'_' + str(1) + '.pvtu'
filepath = path+file0
print 'reading',filepath
#
data = vtktools.vtu(filepath)
coords = data.GetLocations()
del data
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
depths = sorted(list(set(coords[:,2])))
f = 0.00073
g = 9.81
r0 = 1027.0
xn = 150
tn = 24
Xr = np.linspace(0.0,np.max(coords[:,0]),xn)
del coords
gc.collect()
Tr = np.linspace(0,2.0*np.pi,tn)
pts = []
for t in Tr:
for r in Xr:
for z in depths:
pts.append([r*np.cos(t),r*np.sin(t),z])
pts = np.asarray(pts)
print 'looping'
print memory_usage(-1, interval=.2, timeout=.2)
d = 4
for time in range(dayi,dayf,days):
PVt = np.zeros((d,len(depths),xn))
Rhot = np.zeros((d,len(depths),xn))
tlabel = str(time)
file1 = label+'_' + tlabel
print 'day',tlabel
#
for stime in range(d):
stlabel = str(time+stime)
while len(stlabel) < 3: stlabel = '0'+stlabel
file0 = basename+'_' + str(time+stime) + '.pvtu'
filepath = path+file0
#
print 'opening: ', filepath
#
#
data = vtktools.vtu(filepath)
print 'fields: ', data.GetFieldNames()
print 'extract V'
ddc = data.GetDerivative('Velocity_CG')
Zc = ddc[:,1]-ddc[:,3]
del ddc
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
ddr = data.GetDerivative('Density_CG')
gR = ddr[:,2]
del ddr
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
pZ = (Zc+f)*(gR)*-g/r0
del Zc, gR
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
data.AddField('PVorticity', pZ)
del pZ
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
# data.AddField('Vorticity', Zc)
# data.AddField('gradRho', gR)
data.CellDataToPointData()
# Z = data.ProbeData(pts,'Vorticity')
PV = data.ProbeData(pts,'PVorticity')
Rho = data.ProbeData(pts,'Density_CG')
del data
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
# gRho = data.ProbeData(pts,'gradRho')
#
# Azimuthal average
PVr = np.reshape(PV,(tn,xn,len(depths)))
# Zr = np.reshape(Z,(tn,xn,len(depths)))
Rhor = np.reshape(Rho,(tn,xn,len(depths)))
# gRhor = np.reshape(gRho,(tn,xn,len(depths)))
# gRhot = np.zeros((len(depths),xn))
# Zt = np.zeros((len(depths),xn))
for r in range(len(Xr)):
for z in range(len(depths)):
PVt[stime,z,r] = np.mean(PVr[:,r,z])
# Zt[z,r] = np.mean(Zr[:,r,z])
Rhot[stime,z,r] = np.mean(Rhor[:,r,z])
# gRhot[z,r] = np.mean(gRhor[:,r,z])
#
gc.collect()
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.05, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.05, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.05, 1.0, 1.0),
(1.0, 0.0, 0.0))
}
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
v = np.linspace(-0.1e-8, 2e-8, 50, endpoint=True)
vl = np.linspace(-0.1e-8, 2e-8, 5, endpoint=True)
fig = plt.figure()
#plt.contour(Xr,depths,np.mean(PVt,0),colors='k',levels=v)
plt.contourf(Xr/1000,depths,np.mean(PVt,0),v,extend='both',cmap=blue_red1) # plt.cm.PiYG)
plt.colorbar(ticks=vl)
# plt.contour(Xr,depths,np.mean(Rhot,0),colors='k')
plt.xlabel('radius [Km]')
plt.ylabel('depth [m]')
plt.savefig('./plot/'+label+'/PV_'+file1+'.eps',bbox_inches='tight')
plt.close()
del fig
gc.collect()
print memory_usage(-1, interval=.2, timeout=.2)
print 'saving', './plot/'+label+'/PV_'+file1+'.eps'
| gpl-2.0 |
RTHMaK/RPGOne | doc/examples/features_detection/plot_gabor.py | 21 | 4450 | """
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are
similar to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
| apache-2.0 |
johnyf/gr1experiments | tugs/solver.py | 1 | 27671 | import argparse
import copy
import datetime
import logging
import math
import os
import time
from dd import cudd as _bdd
import humanize
import natsort
from omega.logic import syntax
from omega.symbolic import symbolic
import psutil
import __builtin__
try:
__builtin__.profile
except AttributeError:
def profile(func):
return func
log = logging.getLogger(__name__)
# algorithms
BINARY_CONJ = False # overrides `DEFER_Z`
DEFER_Z = False
TWO_MANAGERS = True
MEMOIZE_ITERATES = True
FEEDBACK = False
TIGHT = True
# constants
REORDERING_LOG = 'reorder'
COUNTER = '_jx_b'
SELECTOR = 'strat_type'
WINNING_SET_FILE = 'winning_set'
STRATEGY_FILE = 'tugs_strategy.dddmp'
GB = 2**30
MAX_MEMORY_GB = 10
MAX_MEMORY = MAX_MEMORY_GB * GB
INIT_CACHE = 2**18
# TODO:
#
# group primed and unprimed vars
# use efficient rename for neighbors
# use a CUDD map for repeated renaming
#
# init of counter and strategy_type
# allow passing a desired level for the first bit
# of an integer
@profile
def solve_game(s, load_win_set=False,
win_set_fname=None, strategy_fname=None,
max_memory=None, only_win=False):
"""Construct transducer for game in file `fname`.
@param s: `str` in `slugs` syntax
"""
log.info('++ solver game')
if max_memory is None:
max_memory = MAX_MEMORY
d = parse_slugsin(s)
bdd = _bdd.BDD(
memory_estimate=max_memory,
initial_cache_size=INIT_CACHE)
bdd.configure(
max_memory=max_memory,
max_growth=1.2)
log.info(bdd.configure())
aut = make_automaton(d, bdd)
log_bdd(bdd)
if load_win_set:
z = _bdd.load(win_set_fname, bdd)
else:
# z = slugs_fixpoint(aut)
# z = compute_winning_set_nolog(aut)
z = compute_winning_set(aut)
# z = debug_compute_winning_set(aut)
dump_winning_set(z, bdd, fname=win_set_fname)
log_bdd(bdd)
if z == bdd.false:
print('empty winning set')
return
if only_win:
print('only winning states computed')
return
t = construct_streett_transducer(z, aut, max_memory=max_memory)
dump_strategy(t, fname=strategy_fname)
del z
def dump_winning_set(z, bdd, fname=None):
"""Dump winning set BDD as DDDMP file."""
log.debug('++ dump_winning_set')
if fname is None:
fname = WINNING_SET_FILE
t0 = time.time()
log_event(dump_winning_set_start=True)
memory = 3 * GB
b = _bdd.BDD(memory_estimate=memory)
b.configure(max_memory=memory, reordering=False)
_bdd.copy_vars(bdd, b)
order = var_order(bdd)
_bdd.reorder(b, order)
u = _bdd.copy_bdd(z, bdd, b)
_bdd.dump(u, fname, b)
del u
t1 = time.time()
dt = t1 - t0
log.info(
'Winning set dumped in {dt:1.2} sec'.format(
dt=dt))
log_event(dump_winning_set_end=True)
log.debug('-- done dump_winning_set')
def dump_strategy(t, fname=None):
"""Dump strategy relation BDD as DDDMP file."""
if fname is None:
fname = STRATEGY_FILE
log_event(dump_strategy_start=True)
t0 = time.time()
action = t.action['sys'][0]
t.bdd.dump(action, fname)
t1 = time.time()
dt = t1 - t0
log.info(
'Strategy dumped in {dt:1.2} sec.'.format(dt=dt))
log_event(dump_strategy_end=True)
def log_reordering(fname):
reordering_fname = 'reordering_{f}'.format(f=fname)
log = logging.getLogger(REORDERING_LOG)
h = logging.FileHandler(reordering_fname, 'w')
log.addHandler(h)
log.setLevel('ERROR')
def parse_slugsin(s):
"""Return `dict` keyed by `slugsin` file section."""
log_event(parse_slugsin=True)
sections = dict(
INPUT='input',
OUTPUT='output',
ENV_INIT='env_init',
SYS_INIT='sys_init',
ENV_TRANS='env_action',
SYS_TRANS='sys_action',
ENV_LIVENESS='env_liveness',
SYS_LIVENESS='sys_liveness')
sections = {
'[{k}]'.format(k=k): v
for k, v in sections.iteritems()}
d = dict()
store = None
for line in s.splitlines():
if not line or line.startswith('#'):
continue
if line in sections:
store = list()
key = sections[line]
d[key] = store
continue
assert store is not None
store.append(line)
log.info('-- done parse_slugsin')
return d
def make_automaton(d, bdd):
"""Return `symbolic.Automaton` from slugsin spec.
@type d: dict(str: list)
"""
log_event(make_automaton=True)
# bits -- shouldn't produce safety or init formulae
a = symbolic.Automaton()
a.vars = _init_vars(d)
a = symbolic._bitblast(a)
# formulae
a.init['env'].extend(d['env_init'])
a.action['env'].extend(d['env_action'])
a.init['sys'].extend(d['sys_init'])
a.action['sys'].extend(d['sys_action'])
a.win['[]<>'].extend(d['sys_liveness'])
# negate, to obtain persistence
persistence = ('!' + u for u in d['env_liveness'])
a.win['<>[]'].extend(persistence)
a.conjoin('prefix')
# compile
a.bdd = bdd # use `cudd.BDD`, but fill vars
a = symbolic._bitvector_to_bdd(a)
symbolic.fill_blanks(a, as_bdd=True)
return a
def _init_vars(d):
dvars = dict()
players = dict(input='env', output='sys')
for section in ('input', 'output'):
if section not in d:
continue
owner = players[section]
for bit in d[section]:
dvars[bit] = dict(type='bool', owner=owner)
return dvars
def slugs_fixpoint(aut):
"""Compute winning region, w/o memoizing iterates."""
log_event(winning_set_start=True)
bdd = aut.bdd
env_action = aut.action['env'][0]
sys_action = aut.action['sys'][0]
z = bdd.true
zold = None
while z != zold:
zold = z
znew = bdd.true
zp = _bdd.rename(z, bdd, aut.prime)
for goal in aut.win['[]<>']:
live_trans = goal & zp
y = bdd.false
yold = None
while y != yold:
yold = y
live_trans = live_trans | _bdd.rename(y, bdd, aut.prime)
good = y
for excuse in aut.win['<>[]']:
x = bdd.true
xold = None
while x != xold:
xold = x
x = (
live_trans |
(_bdd.rename(x, bdd, aut.prime) & excuse))
x = (x & sys_action) | ~ env_action
x = bdd.quantify(x, aut.epvars, forall=False)
x = bdd.quantify(x, aut.upvars, forall=True)
good = good | x
y = good
z = znew & y
log.info('Reached Z fixpoint')
log_bdd(bdd, level=20)
log_event(winning_set_end=True)
return z
def debug_compute_winning_set(aut):
"""Compute winning region, w/o memoizing iterates."""
bdd = aut.bdd
env_action = aut.action['env'][0]
sys_action = aut.action['sys'][0]
bdd.dump(env_action, 'env_action_gr1x.txt')
bdd.dump(sys_action, 'sys_action_gr1x.txt')
for i, goal in enumerate(aut.win['[]<>']):
fname = 'goal_{i}_gr1x.txt'.format(i=i)
bdd.dump(goal, fname)
for j, excuse in enumerate(aut.win['<>[]']):
fname = 'assumption_{j}_gr1x.txt'.format(j=j)
assumption = ~ excuse
bdd.dump(assumption, fname)
z = bdd.true
zold = None
while z != zold:
zold = z
znew = bdd.true
for goal in aut.win['[]<>']:
zp = _bdd.rename(z, bdd, aut.prime)
live_trans = goal & zp
y = bdd.false
yold = None
while y != yold:
yold = y
yp = _bdd.rename(y, bdd, aut.prime)
live_trans = live_trans | yp
good = y
for excuse in aut.win['<>[]']:
x = bdd.true
xold = None
while x != xold:
xold = x
xp = _bdd.rename(x, bdd, aut.prime)
paths = xp & excuse
paths = paths | live_trans
x = _bdd.and_exists(paths, sys_action,
aut.epvars, bdd)
x = _bdd.or_forall(x, ~ env_action,
aut.upvars, bdd)
good = good | x
y = good
znew = znew & y
z = znew
return z
def compute_winning_set_nolog(aut, z=None):
"""Compute winning region, w/o memoizing iterates."""
log_event(winning_set_start=True)
bdd = aut.bdd
env_action = aut.action['env'][0]
sys_action = aut.action['sys'][0]
if z is None:
z = bdd.true
zold = None
while z != zold:
zold = z
# moved this
zp = _bdd.rename(z, bdd, aut.prime)
yj = list()
for goal in aut.win['[]<>']:
live_trans = goal & zp
y = bdd.false
yold = None
while y != yold:
yold = y
yp = _bdd.rename(y, bdd, aut.prime)
live_trans = live_trans | yp
good = y
for excuse in aut.win['<>[]']:
x = bdd.true
xold = None
while x != xold:
xold = x
xp = _bdd.rename(x, bdd, aut.prime)
x = (xp & excuse) | live_trans
x = _bdd.or_forall(
_bdd.and_exists(x, sys_action,
aut.epvars, bdd),
~ env_action, aut.upvars, bdd)
del xold
good = good | x
del x
y = good
del good
del yold, live_trans
if BINARY_CONJ or DEFER_Z:
yj.append(y)
else:
z = z & y
del y, goal
del zp
if BINARY_CONJ:
z = syntax.recurse_binary(conj, yj)
elif DEFER_Z:
z = syntax._linear_operator_simple(conj, yj)
log.info('Reached Z fixpoint')
log_bdd(bdd, level=20)
log_event(winning_set_end=True)
return z
@profile
def compute_winning_set(aut, z=None):
"""Compute winning region, w/o memoizing iterates."""
log_event(winning_set_start=True)
# reordering_log = logging.getLogger(REORDERING_LOG)
bdd = aut.bdd
env_action = aut.action['env'][0]
sys_action = aut.action['sys'][0]
if z is None:
z = bdd.true
zold = None
log.debug('Before z fixpoint')
while z != zold:
log.debug('Start Z iteration')
paths_memoized = list()
zold = z
if not FEEDBACK:
zp = _bdd.rename(zold, bdd, aut.prime)
yj = list()
for j, goal in enumerate(aut.win['[]<>']):
log.debug('Goal: {j}'.format(j=j))
# log.info(bdd)
if FEEDBACK:
zp = _bdd.rename(z, bdd, aut.prime)
live_trans = goal & zp
y = bdd.false
yold = None
while y != yold:
log.debug('Start Y iteration')
yold = y
yp = _bdd.rename(y, bdd, aut.prime)
live_trans = live_trans | yp
or_x = y
for i, excuse in enumerate(aut.win['<>[]']):
if FEEDBACK:
x = z
else:
x = zold
xold = None
while x != xold:
log.debug('Start X iteration')
xold = x
xp = _bdd.rename(x, bdd, aut.prime)
# desired transitions
x = xp & excuse
x = x | live_trans
x = _bdd.and_exists(x, sys_action,
aut.epvars, bdd)
x = _bdd.or_forall(x, ~ env_action,
aut.upvars, bdd)
if log.getEffectiveLevel() <= logging.DEBUG:
log_loop(i, j, None, x, y, z)
log_bdd(bdd)
log.debug('Reached X fixpoint')
del xold
or_x = or_x | x
del x
if MEMOIZE_ITERATES:
paths_memoized.append(
((xp & excuse) | live_trans) &
sys_action)
y = or_x
del or_x
log.debug('Reached Y fixpoint')
del yold, live_trans
if BINARY_CONJ or DEFER_Z:
yj.append(y)
elif TIGHT:
z = z & y
else:
z = y
del y, goal
del zp
if BINARY_CONJ:
z = syntax.recurse_binary(conj, yj)
elif DEFER_Z:
z = syntax._linear_operator_simple(conj, yj)
# bdd.assert_consistent()
log.info('Reached Z fixpoint')
log_bdd(bdd, level=20)
log_event(winning_set_end=True)
return z
@profile
def construct_streett_transducer(z, aut, max_memory=None):
"""Return Street(1) I/O transducer."""
log_event(make_transducer_start=True)
if max_memory is None:
max_memory = MAX_MEMORY
# reordering_log = logging.getLogger(REORDERING_LOG)
bdd = aut.bdd
# one more manager
if TWO_MANAGERS:
b3 = _bdd.BDD(memory_estimate=max_memory)
b3.configure(max_memory=max_memory)
_bdd.copy_vars(bdd, b3)
# copy var order
# order = var_order(bdd)
# _bdd.reorder(b3, order)
# copy actions with reordering off
else:
b3 = bdd
env_action = aut.action['env'][0]
sys_action = aut.action['sys'][0]
sys_action_2 = copy_bdd(sys_action, bdd, b3)
env_action_2 = copy_bdd(env_action, bdd, b3)
# Compute iterates, now that we know the outer fixpoint
log_bdd(b3, name='b3_', level=20)
log.info('done copying actions')
zp = _bdd.rename(z, bdd, aut.prime)
# transducer automaton
t = symbolic.Automaton()
t.vars = copy.deepcopy(aut.vars)
t.vars[SELECTOR] = dict(type='bool', owner='sys', level=0)
n_goals = len(aut.win['[]<>'])
t.vars[COUNTER] = dict(
type='saturating', dom=(0, n_goals - 1),
owner='sys', level=0)
t = t.build(b3, add=True)
transducers = list()
selector = t.add_expr(SELECTOR)
# max_vars = 20
# b3.configure(max_vars=max_vars)
log.info(b3.configure())
for j, goal in enumerate(aut.win['[]<>']):
log.debug('Goal: {j}'.format(j=j))
log_bdd(bdd)
# for fixpoint
live_trans = goal & zp
y = bdd.false
yold = None
# for strategy construction
covered = b3.false
transducer = b3.false
while y != yold:
log.debug('Start Y iteration')
yold = y
yp = _bdd.rename(y, bdd, aut.prime)
live_trans = live_trans | yp
or_x = y
for i, excuse in enumerate(aut.win['<>[]']):
x = z
xold = None
paths = None
new = None
while x != xold:
del paths, new
log.debug('Start X iteration')
xold = x
xp = _bdd.rename(x, bdd, aut.prime)
x = xp & excuse
del xp
paths = x | live_trans
new = _bdd.and_exists(paths, sys_action,
aut.epvars, bdd)
x = _bdd.or_forall(new, ~ env_action,
aut.upvars, bdd)
if log.getEffectiveLevel() <= logging.DEBUG:
log_loop(i, j, None, x, y, z)
log_bdd(bdd)
log.debug('Reached X fixpoint')
del xold, excuse
or_x = or_x | x
del x
# strategy construction
# in `b3`
log.debug('transfer `paths` to `b3`')
paths = copy_bdd(paths, bdd, b3)
new = copy_bdd(new, bdd, b3)
log.debug('done transferring')
rim = new & ~ covered
covered = covered | new
del new
rim = rim & paths
del paths
transducer = transducer | rim
del rim
y = or_x
del or_x
log.debug('Reached Y fixpoint (Y = Z)')
assert y == z, (y, z)
del y, yold, covered
log_bdd(b3, name='b3_')
# make transducer
goal = copy_bdd(goal, bdd, b3)
e = '{c} = {j}'.format(c=COUNTER, j=j)
counter = t.add_expr(e)
u = goal | ~ selector
del goal
u = counter & u
del counter
transducer = transducer & u
del u
transducer = transducer & sys_action_2
# check_winning_region(transducer, aut, t,
# bdd, other_bdd, z, j)
transducers.append(transducer)
# s = var_order(other_bdd)
# reordering_log.debug(repr(s))
del transducer
del sys_action_2, zp
log_bdd(b3, name='b3_', level=20)
log.info('disjoin transducers')
if BINARY_CONJ:
transducer = syntax.recurse_binary(disj, transducers)
n_remain = len(transducers)
assert n_remain == 0, n_remain
else:
transducer = syntax._linear_operator_simple(
disj, transducers)
log.info('done with disjunction')
# add counter limits
# transducer = transducer & t.action['sys'][0]
# env lost ?
# transducer = transducer | ~ env_action_2
t.action['sys'] = [transducer]
n_nodes = len(transducer)
print('Transducer BDD: {n} nodes'.format(n=n_nodes))
log_event(transducer_nodes=n_nodes)
log_bdd(bdd, level=20)
log_bdd(b3, name='b3_', level=20)
log_event(make_transducer_end=True)
# self-check
# check_winning_region(transducer, aut, t, bdd,
# other_bdd, z, 0)
# \A x: \E y: realizability
env_init = aut.init['env'][0]
sys_init = aut.init['sys'][0]
win_set = z
env_init = copy_bdd(env_init, bdd, b3)
sys_init = copy_bdd(sys_init, bdd, b3)
win_set = copy_bdd(win_set, bdd, b3)
r = (sys_init & win_set) | ~ env_init
r = b3.quantify(r, aut.evars, forall=False)
r = b3.quantify(r, aut.uvars, forall=True)
if r == b3.true:
print('realizable for \A x: \E y')
elif r == b3.false:
print('unrealizable for \A x: \E y')
else:
raise Exception('`r` should have been constant')
del r
del selector, env_action_2, transducer
return t
def copy_bdd(u, a, b):
if a is b:
assert not TWO_MANAGERS
return u
else:
return _bdd.copy_bdd(u, a, b)
def log_event(**d):
"""Log `dict` `d` with timestamp."""
t = time.time()
dlog = dict(d)
dlog['time'] = t
log.info('')
log.info(dlog)
date = datetime.datetime.fromtimestamp(t)
s = date.strftime('%Y-%m-%d %H:%M:%S')
log.info(s) # for direct reading by humans
def log_loop(i, j, transducer, x, y, z):
if log.getEffectiveLevel() > logging.DEBUG:
return
if transducer is not None:
transducer_nodes = len(transducer)
else:
transducer_nodes = None
t = time.time()
dlog = dict(
time=t,
goal=j,
excuse=i,
transducer_nodes=transducer_nodes,
x_nodes=len(x),
y_nodes=len(y),
z_nodes=len(z))
log.debug(dlog)
def log_bdd(bdd, name='', level=10):
"""Log statistics for given BDD manager.
If logger has effective level `<= level`,
then skip logging.
"""
if log.getEffectiveLevel() > level:
return
# `psutil` used as in `openpromela.slugsin`
pid = os.getpid()
proc = psutil.Process(pid)
rss, vms = proc.memory_info()
try:
stats = bdd.statistics()
reordering_time = float(stats['reordering_time'])
n_reorderings = int(stats['n_reorderings'])
peak_nodes = int(stats['peak_nodes'])
except AttributeError:
# using `autoref`
reordering_time = None
peak_nodes = None
t = time.time()
dlog = {
'time': t,
'rss': humanize.naturalsize(rss),
'vms': humanize.naturalsize(vms),
name + 'reordering_time': reordering_time,
name + 'n_reorderings': n_reorderings,
name + 'total_nodes': len(bdd),
name + 'peak_nodes': peak_nodes}
log.info(dlog)
def check_winning_region(transducer, aut, t, bdd,
other_bdd, z, j):
u = transducer
u = symbolic.cofactor(transducer, COUNTER, j,
other_bdd, t.vars)
u = other_bdd.quantify(u, [SELECTOR], forall=False)
u = other_bdd.quantify(u, t.epvars, forall=False)
u = other_bdd.quantify(u, t.upvars, forall=True)
z_ = _bdd.copy_bdd(z, bdd, other_bdd)
print('u == z', u == z_)
def old_cox(x, env_action, sys_action, aut):
bdd = aut.bdd
x = x & sys_action
x = bdd.quantify(x, aut.epvars, forall=False)
x = x | ~ env_action
x = bdd.quantify(x, aut.upvars, forall=True)
return x
def recurse_binary(f, x, bdds):
"""Recursively traverse binary tree of computation."""
log.debug('++ recurse binary')
n = len(x)
log.debug('{n} items left to recurse'.format(n=n))
assert n > 0
if n == 1:
assert len(x) == 1, x
assert len(bdds) == 1, bdds
return x.pop(), bdds.pop()
k = int(math.floor(math.log(n, 2)))
m = 2**k
if m == n:
m = int(n / 2.0)
left = x[:m]
right = x[m:]
del x[:]
a, bdd_a = recurse_binary(f, left, bdds[:m])
b, bdd_b = recurse_binary(f, right, bdds[m:])
new_bdd = _bdd.BDD()
_bdd.copy_vars(bdds[0], new_bdd)
cpa = _bdd.copy_bdd(a, bdd_a, new_bdd)
cpb = _bdd.copy_bdd(b, bdd_b, new_bdd)
# logger.info(bdds)
log.debug(
'-- done recurse binary ({n} items)'.format(n=n))
return f(cpa, cpb), new_bdd
def make_strategy(store, all_new, j, goal, aut):
log.info('++ Make strategy for goal: {j}'.format(j=j))
bdd = aut.bdd
log.info(bdd)
covered = bdd.false
transducer = bdd.false
while store:
log.info('covering...')
assert all_new
paths = store.pop(0)
new = all_new.pop(0)
rim = new & ~ covered
covered = covered | new
del new
rim = rim & paths
del paths
transducer = transducer | rim
del rim
assert not store, store
assert not all_new, all_new
counter = aut.add_expr('{c} = {j}'.format(c=COUNTER, j=j))
selector = aut.add_expr(SELECTOR)
transducer = transducer & counter & (goal | ~ selector)
log.info(
'-- done making strategy for goal: {j}'.format(j=j))
return transducer
def _and_exists(u, v, qvars, bdd):
try:
return _bdd.and_exists(u, v, qvars, bdd)
except:
r = u & v
return bdd.quantify(r, qvars, forall=False)
def _or_forall(u, v, qvars, bdd):
try:
return _bdd.or_forall(u, v, qvars, bdd)
except:
r = u | v
return bdd.quantify(r, qvars, forall=True)
def disj(x, y):
return x | y
def conj(x, y):
return x & y
def memoize_iterates(z, aut):
"""Compute winning set, while storing iterates."""
pass
def load_order_history(fname):
with open(fname, 'r') as f:
s = f.read()
t = dict()
for line in s.splitlines():
d = eval(line)
for k, v in d.iteritems():
if k not in t:
t[k] = list()
t[k].append(v)
return t
def log_var_order(bdd):
reordering_log = logging.getLogger(REORDERING_LOG)
s = var_order(bdd)
reordering_log.debug(repr(s))
def var_order(bdd):
"""Return `dict` that maps each variable to a level.
@rtype: `dict(str: int)`
"""
return {var: bdd.level_of_var(var) for var in bdd.vars}
def main():
fname = 'reordering_slugs_31.txt'
other_fname = 'reordering_slugs_31_old.txt'
p = argparse.ArgumentParser()
p.add_argument('--file', type=str,
help='slugsin input file')
p.add_argument('--plot-order', action='store_true',
help='plot reordering of variales from log')
args = p.parse_args()
if args.plot_order:
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
t = load_order_history(fname)
other_t = load_order_history(other_fname)
plt.hold('on')
for i, k in enumerate(natsort.natsorted(t)):
v = t[k]
w = other_t.get(k)
if w is None:
print('Missing var "{var}"'.format(var=k))
continue
m = min(len(v), len(w))
# ax.plot(range(len(v)), v, i)
ax.plot(v[:m], w[:m], i)
plt.savefig('reordering.pdf')
plt.show()
return
input_fname = args.file
command_line_wrapper(input_fname)
def command_line_wrapper(args=None):
"""Solve game defined in `slugsin` file `fname`."""
p = argparse.ArgumentParser()
p.add_argument('file', type=str,
help='`slugsin` input file')
p.add_argument('--load_win_set', action='store_true',
help='load winning set BDD from given file')
p.add_argument('--win_set', type=str,
help='dump winning set BDD to this file')
p.add_argument('--strategy', type=str,
help='dump strategy BDD to this file')
p.add_argument('--debug', default=30, type=int,
help='logging level')
p.add_argument('--max_memory', default=MAX_MEMORY_GB, type=int,
help='(hard) upper bound on memory, in GB')
p.add_argument('--cpu', default=0, type=int,
help='attach self to this logical CPU id')
p.add_argument('--only_win', action='store_true',
help='only compute winning states')
args = p.parse_args(args=args)
# pin to CPU
affinity = [args.cpu]
proc = psutil.Process()
proc.cpu_affinity(affinity)
# logging
level = args.debug
log.setLevel(level=level)
h = logging.StreamHandler()
log.addHandler(h)
# input
win_set_fname = args.win_set
strategy_fname = args.strategy
fname = args.file
max_memory = args.max_memory * GB
only_win = args.only_win
with open(fname, 'r') as f:
slugsin = f.read()
solve_game(
slugsin,
win_set_fname=win_set_fname,
strategy_fname=strategy_fname,
max_memory=max_memory,
only_win=only_win)
def test_indices_and_levels():
bdd = _bdd.BDD()
ja = bdd.add_var('a', index=3)
jb = bdd.add_var('b', index=10)
jc = bdd.add_var('c', index=0)
print(ja, jb, jc)
print('a level', bdd.level_of_var('a'))
print('b level', bdd.level_of_var('b'))
print('c level', bdd.level_of_var('c'))
u = bdd.var('a') & bdd.var('b')
print str(u)
print bdd.var_at_level(10)
| bsd-3-clause |
eatbyte/Swift | swift/common/middleware/xprofile.py | 36 | 9905 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6000/__profile__/all (object server)
http://localhost:6001/__profile__/current (container server)
http://localhost:6002/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library.
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data.
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
ProfileException
from x_profile.html_viewer import HTMLViewer
from x_profile.profile_model import ProfileLog
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
text_type = str
else:
text_type = unicode
def bytes_(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type): # pragma: no cover
return s.encode(encoding, errors)
return s
try:
from urllib.parse import parse_qs
except ImportError:
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read()
query_dict.update(parse_qs(qs_in_body, keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
return [bytes_(content)]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
| apache-2.0 |
roxyboy/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/type1font.py | 4 | 12042 | """
This module contains a class representing a Type 1 font.
This version reads pfa and pfb files and splits them for embedding in
pdf files. It also supports SlantFont and ExtendFont transformations,
similarly to pdfTeX and friends. There is no support yet for
subsetting.
Usage::
>>> font = Type1Font(filename)
>>> clear_part, encrypted_part, finale = font.parts
>>> slanted_font = font.transform({'slant': 0.167})
>>> extended_font = font.transform({'extend': 1.2})
Sources:
* Adobe Technical Note #5040, Supporting Downloadable PostScript
Language Fonts.
* Adobe Type 1 Font Format, Adobe Systems Incorporated, third printing,
v1.1, 1993. ISBN 0-201-57044-0.
"""
from __future__ import print_function
import matplotlib.cbook as cbook
import io
import itertools
import numpy as np
import re
import struct
import sys
if sys.version_info[0] >= 3:
def ord(x):
return x
class Type1Font(object):
"""
A class representing a Type-1 font, for use by backends.
.. attribute:: parts
A 3-tuple of the cleartext part, the encrypted part, and the
finale of zeros.
.. attribute:: prop
A dictionary of font properties.
"""
__slots__ = ('parts', 'prop')
def __init__(self, input):
"""
Initialize a Type-1 font. *input* can be either the file name of
a pfb file or a 3-tuple of already-decoded Type-1 font parts.
"""
if isinstance(input, tuple) and len(input) == 3:
self.parts = input
else:
with open(input, 'rb') as file:
data = self._read(file)
self.parts = self._split(data)
self._parse()
def _read(self, file):
"""
Read the font from a file, decoding into usable parts.
"""
rawdata = file.read()
if not rawdata.startswith(b'\x80'):
return rawdata
data = b''
while len(rawdata) > 0:
if not rawdata.startswith(b'\x80'):
raise RuntimeError('Broken pfb file (expected byte 128, '
'got %d)' % ord(rawdata[0]))
type = ord(rawdata[1])
if type in (1, 2):
length, = struct.unpack('<i', rawdata[2:6])
segment = rawdata[6:6 + length]
rawdata = rawdata[6 + length:]
if type == 1: # ASCII text: include verbatim
data += segment
elif type == 2: # binary data: encode in hexadecimal
data += b''.join([('%02x' % ord(char)).encode('ascii')
for char in segment])
elif type == 3: # end of file
break
else:
raise RuntimeError('Unknown segment type %d in pfb file' %
type)
return data
def _split(self, data):
"""
Split the Type 1 font into its three main parts.
The three parts are: (1) the cleartext part, which ends in a
eexec operator; (2) the encrypted part; (3) the fixed part,
which contains 512 ASCII zeros possibly divided on various
lines, a cleartomark operator, and possibly something else.
"""
# Cleartext part: just find the eexec and skip whitespace
idx = data.index(b'eexec')
idx += len(b'eexec')
while data[idx] in b' \t\r\n':
idx += 1
len1 = idx
# Encrypted part: find the cleartomark operator and count
# zeros backward
idx = data.rindex(b'cleartomark') - 1
zeros = 512
while zeros and ord(data[idx]) in (
ord(b'0'[0]), ord(b'\n'[0]), ord(b'\r'[0])):
if ord(data[idx]) == ord(b'0'[0]):
zeros -= 1
idx -= 1
if zeros:
raise RuntimeError('Insufficiently many zeros in Type 1 font')
# Convert encrypted part to binary (if we read a pfb file, we
# may end up converting binary to hexadecimal to binary again;
# but if we read a pfa file, this part is already in hex, and
# I am not quite sure if even the pfb format guarantees that
# it will be in binary).
binary = b''.join([unichr(int(data[i:i + 2], 16)).encode('latin-1')
for i in range(len1, idx, 2)])
return data[:len1], binary, data[idx:]
_whitespace = re.compile(br'[\0\t\r\014\n ]+')
_token = re.compile(br'/{0,2}[^]\0\t\r\v\n ()<>{}/%[]+')
_comment = re.compile(br'%[^\r\n\v]*')
_instring = re.compile(br'[()\\]')
@classmethod
def _tokens(cls, text):
"""
A PostScript tokenizer. Yield (token, value) pairs such as
('whitespace', ' ') or ('name', '/Foobar').
"""
pos = 0
while pos < len(text):
match = (cls._comment.match(text[pos:]) or
cls._whitespace.match(text[pos:]))
if match:
yield ('whitespace', match.group())
pos += match.end()
elif text[pos] == '(':
start = pos
pos += 1
depth = 1
while depth:
match = cls._instring.search(text[pos:])
if match is None:
return
pos += match.end()
if match.group() == '(':
depth += 1
elif match.group() == ')':
depth -= 1
else: # a backslash - skip the next character
pos += 1
yield ('string', text[start:pos])
elif text[pos:pos + 2] in ('<<', '>>'):
yield ('delimiter', text[pos:pos + 2])
pos += 2
elif text[pos] == '<':
start = pos
pos += text[pos:].index('>')
yield ('string', text[start:pos])
else:
match = cls._token.match(text[pos:])
if match:
try:
float(match.group())
yield ('number', match.group())
except ValueError:
yield ('name', match.group())
pos += match.end()
else:
yield ('delimiter', text[pos])
pos += 1
def _parse(self):
"""
Find the values of various font properties. This limited kind
of parsing is described in Chapter 10 "Adobe Type Manager
Compatibility" of the Type-1 spec.
"""
# Start with reasonable defaults
prop = {'weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False,
'UnderlinePosition': -100, 'UnderlineThickness': 50}
tokenizer = self._tokens(self.parts[0])
filtered = itertools.ifilter(lambda x: x[0] != 'whitespace', tokenizer)
for token, value in filtered:
if token == b'name' and value.startswith(b'/'):
key = value[1:]
token, value = next(filtered)
if token == b'name':
if value in (b'true', b'false'):
value = value == b'true'
else:
value = value.lstrip(b'/')
elif token == b'string':
value = value.lstrip(b'(').rstrip(b')')
elif token == b'number':
if b'.' in value:
value = float(value)
else:
value = int(value)
else: # more complicated value such as an array
value = None
if key != b'FontInfo' and value is not None:
prop[key] = value
# Fill in the various *Name properties
if 'FontName' not in prop:
prop['FontName'] = (prop.get('FullName') or
prop.get('FamilyName') or
'Unknown')
if 'FullName' not in prop:
prop['FullName'] = prop['FontName']
if 'FamilyName' not in prop:
extras = r'(?i)([ -](regular|plain|italic|oblique|(semi)?bold|(ultra)?light|extra|condensed))+$'
prop['FamilyName'] = re.sub(extras, '', prop['FullName'])
self.prop = prop
@classmethod
def _transformer(cls, tokens, slant, extend):
def fontname(name):
result = name
if slant:
result += '_Slant_' + str(int(1000 * slant))
if extend != 1.0:
result += '_Extend_' + str(int(1000 * extend))
return result
def italicangle(angle):
return str(float(angle) - np.arctan(slant) / np.pi * 180)
def fontmatrix(array):
array = array.lstrip('[').rstrip(']').strip().split()
array = [float(x) for x in array]
oldmatrix = np.eye(3, 3)
oldmatrix[0:3, 0] = array[::2]
oldmatrix[0:3, 1] = array[1::2]
modifier = np.array([[extend, 0, 0],
[slant, 1, 0],
[0, 0, 1]])
newmatrix = np.dot(modifier, oldmatrix)
array[::2] = newmatrix[0:3, 0]
array[1::2] = newmatrix[0:3, 1]
return '[' + ' '.join(str(x) for x in array) + ']'
def replace(fun):
def replacer(tokens):
token, value = next(tokens) # name, e.g., /FontMatrix
yield value
token, value = next(tokens) # possible whitespace
while token == 'whitespace':
yield value
token, value = next(tokens)
if value != '[': # name/number/etc.
yield fun(value)
else: # array, e.g., [1 2 3]
array = []
while value != ']':
array += value
token, value = next(tokens)
array += value
yield fun(''.join(array))
return replacer
def suppress(tokens):
for x in itertools.takewhile(lambda x: x[1] != 'def', tokens):
pass
yield ''
table = {'/FontName': replace(fontname),
'/ItalicAngle': replace(italicangle),
'/FontMatrix': replace(fontmatrix),
'/UniqueID': suppress}
while True:
token, value = next(tokens)
if token == 'name' and value in table:
for value in table[value](itertools.chain([(token, value)],
tokens)):
yield value
else:
yield value
def transform(self, effects):
"""
Transform the font by slanting or extending. *effects* should
be a dict where ``effects['slant']`` is the tangent of the
angle that the font is to be slanted to the right (so negative
values slant to the left) and ``effects['extend']`` is the
multiplier by which the font is to be extended (so values less
than 1.0 condense). Returns a new :class:`Type1Font` object.
"""
buffer = io.BytesIO()
try:
tokenizer = self._tokens(self.parts[0])
for value in self._transformer(tokenizer,
slant=effects.get('slant', 0.0),
extend=effects.get('extend', 1.0)):
if sys.version_info[0] >= 3 and isinstance(value, int):
value = chr(value).encode('latin-1')
buffer.write(value)
result = buffer.getvalue()
finally:
buffer.close()
return Type1Font((result, self.parts[1], self.parts[2]))
| unlicense |
bthirion/nipy | examples/labs/example_glm.py | 4 | 3262 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
This is an example where:
1. An sequence of fMRI volumes are simulated
2. A design matrix describing all the effects related to the data is computed
3. A GLM is applied to all voxels
4. A contrast image is created
Requires matplotlib
Author : Bertrand Thirion, 2010
"""
print(__doc__)
import os
import os.path as op
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nibabel import save, Nifti1Image
import nipy.modalities.fmri.design_matrix as dm
from nipy.labs.utils.simul_multisubject_fmri_dataset import \
surrogate_4d_dataset
from nipy.modalities.fmri.glm import GeneralLinearModel
from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm
#######################################
# Simulation parameters
#######################################
# volume mask
shape = (20, 20, 20)
affine = np.eye(4)
# Acquisition parameters: number of scans (n_scans) and volume repetition time
# value in seconds
n_scans = 128
tr = 2.4
# input paradigm information
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
# conditions are 0 1 0 1 0 1 ...
conditions = np.arange(20) % 2
# 20 onsets (in sec), first event 10 sec after the start of the first scan
onsets = np.linspace(5, (n_scans - 1) * tr - 10, 20)
# model with canonical HRF (could also be :
# 'canonical with derivative' or 'fir'
hrf_model = 'canonical'
# fake motion parameters to be included in the model
motion = np.cumsum(np.random.randn(n_scans, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
########################################
# Design matrix
########################################
paradigm = EventRelatedParadigm(conditions, onsets)
X, names = dm.dmtx_light(frametimes, paradigm, drift_model='cosine',
hfcut=128, hrf_model=hrf_model, add_regs=motion,
add_reg_names=add_reg_names)
#######################################
# Get the FMRI data
#######################################
fmri_data = surrogate_4d_dataset(shape=shape, n_scans=n_scans)[0]
# if you want to save it as an image
data_file = 'fmri_data.nii'
save(fmri_data, data_file)
########################################
# Perform a GLM analysis
########################################
# GLM fit
Y = fmri_data.get_data().reshape(np.prod(shape), n_scans)
glm = GeneralLinearModel(X)
glm.fit(Y.T)
# specify the contrast [1 -1 0 ..]
contrast = np.zeros(X.shape[1])
contrast[0] = 1
contrast[1] = - 1
# compute the constrast image related to it
zvals = glm.contrast(contrast).z_score()
contrast_image = Nifti1Image(np.reshape(zvals, shape), affine)
# if you want to save the contrast as an image
contrast_path = 'zmap.nii'
save(contrast_image, contrast_path)
print('Wrote the some of the results as images in directory %s' %
op.abspath(os.getcwd()))
h, c = np.histogram(zvals, 100)
# Show the histogram
plt.figure()
plt.bar(c[: - 1], h, width=.1)
plt.title(' Histogram of the z-values')
plt.show()
| bsd-3-clause |
fbagirov/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
kyleam/seaborn | examples/many_facets.py | 26 | 1062 | """
Plotting on a large number of facets
====================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks")
# Create a dataset with many short random walks
rs = np.random.RandomState(4)
pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)
pos -= pos[:, 0, np.newaxis]
step = np.tile(range(5), 20)
walk = np.repeat(range(20), 5)
df = pd.DataFrame(np.c_[pos.flat, step, walk],
columns=["position", "step", "walk"])
# Initialize a grid of plots with an Axes for each walk
grid = sns.FacetGrid(df, col="walk", hue="walk", col_wrap=5, size=1.5)
# Draw a horizontal line to show the starting point
grid.map(plt.axhline, y=0, ls=":", c=".5")
# Draw a line plot to show the trajectory of each random walk
grid.map(plt.plot, "step", "position", marker="o", ms=4)
# Adjust the tick positions and labels
grid.set(xticks=np.arange(5), yticks=[-3, 3],
xlim=(-.5, 4.5), ylim=(-3.5, 3.5))
# Adjust the arrangement of the plots
grid.fig.tight_layout(w_pad=1)
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/neighbors/classification.py | 27 | 14358 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
ljwolf/pysal | pysal/contrib/geotable/utils.py | 2 | 3271 | from ...cg import asShape as pShape
from ...common import requires as _requires
from warnings import warn
@_requires('geopandas')
def to_df(df, geom_col='geometry', **kw):
"""
Convert a Geopandas dataframe into a normal pandas dataframe with a column
containing PySAL shapes. Always returns a copy.
Arguments
---------
df : geopandas.GeoDataFrame
a geopandas dataframe (or pandas dataframe) with a column
containing geo-interfaced shapes
geom_col: str
string denoting which column in the df contains the geometry
**kw : keyword options
options passed directly to pandas.DataFrame(...,**kw)
See Also
--------
pandas.DataFrame
"""
import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
out = df.copy(deep=True)
out[geom_col] = out[geom_col].apply(pShape)
return pd.DataFrame(out, **kw)
@_requires('geopandas')
def to_gdf(df, geom_col='geometry', **kw):
"""
Convert a pandas dataframe with geometry column to a GeoPandas dataframe. Returns a copy always.
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a column containing geo-interfaced
shapes
geom_col: str
string denoting which column in the df contains the geometry
**kw : keyword options
options passed directly to geopandas.GeoDataFrame(...,**kw)
See Also
--------
geopandas.GeoDataFrame
"""
from geopandas import GeoDataFrame
from shapely.geometry import asShape as sShape
out = df.copy(deep=True)
out[geom_col] = out[geom_col].apply(sShape)
out = GeoDataFrame(out, geometry=geom_col, **kw)
return out
def insert_metadata(df, obj, name=None, inplace=False, overwrite=False):
"""
Insert an object into a dataframe's metadata with a given key.
Arguments
------------
df : pd.DataFrame
dataframe to insert into the metadata
obj : object
object desired to insert into the dataframe
name : string
key of the object to use. Will be available as
an attribute of the dataframe.
inplace : bool
flag to denote whether to operate on a copy
of the dataframe or not.
overwrite : bool
flag to denote whether to replace existing entry
in metadata or not.
Returns
--------
If inplace, changes dataframe implicitly.
Else, returns a new dataframe with added metadata.
"""
if not inplace:
new = df.copy(deep=True)
insert_metadata(new, obj, name=name,
inplace=True, overwrite=overwrite)
return new
if name is None:
name = type(obj).__name__
if hasattr(df, name):
if overwrite:
warn('Overwriting attribute {}! This may break the dataframe!'.format(name))
else:
raise Exception('Dataframe already has attribute {}. Cowardly refusing '
'to break dataframe. '.format(name))
df._metadata.append(name)
df.__setattr__(name, obj)
| bsd-3-clause |
loli/semisupervisedforests | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
pv/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/tests/test_metaestimators.py | 52 | 4990 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
fbagirov/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
trungnt13/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tseries/holiday.py | 9 | 16177 | import warnings
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa
from pandas.tseries.offsets import Easter, Day
import numpy as np
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year=%s, ' % self.year
info += 'month=%s, day=%s, ' % (self.month, self.day)
if self.offset is not None:
info += 'offset=%s' % self.offset
if self.observance is not None:
info += 'observance=%s' % self.observance
repr = 'Holiday: %s (%s)' % (self.name, info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = DatetimeIndex(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings(record=True):
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(
cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
| mit |
charanpald/sandbox | sandbox/util/test/MCEvaluatorCythonTest.py | 1 | 5459 | import logging
import unittest
import numpy
import scipy.sparse
import sklearn.metrics
import numpy.testing as nptst
from sandbox.util.MCEvaluatorCython import MCEvaluatorCython
from sandbox.util.MCEvaluator import MCEvaluator
from sandbox.util.SparseUtils import SparseUtils
from sandbox.util.SparseUtilsCython import SparseUtilsCython
from sandbox.util.Util import Util
class MCEvaluatorCythonTest(unittest.TestCase):
def setUp(self):
numpy.random.seed(21)
def testRecommendAtk(self):
m = 20
n = 50
r = 3
X, U, s, V, wv = SparseUtils.generateSparseBinaryMatrix((m,n), r, 0.5, verbose=True)
import sppy
X = sppy.csarray(X)
k = 10
X = numpy.zeros(X.shape)
omegaList = []
for i in range(m):
omegaList.append(numpy.random.permutation(n)[0:5])
X[i, omegaList[i]] = 1
X = sppy.csarray(X)
orderedItems = MCEvaluatorCython.recommendAtk(U, V, k, X)
orderedItems2 = MCEvaluator.recommendAtk(U, V, k, omegaList=omegaList)
nptst.assert_array_equal(orderedItems[orderedItems2!=-1], orderedItems2[orderedItems2!=-1])
for i in range(m):
items = numpy.intersect1d(omegaList[i], orderedItems[i, :])
self.assertEquals(items.shape[0], 0)
#items = numpy.union1d(omegaList[i], orderedItems[i, :])
#items = numpy.intersect1d(items, orderedItems2[i, :])
#nptst.assert_array_equal(items, numpy.sort(orderedItems2[i, :]))
#Now let's have an all zeros X
X = sppy.csarray(X.shape)
orderedItems = MCEvaluatorCython.recommendAtk(U, V, k, X)
orderedItems2 = MCEvaluator.recommendAtk(U, V, k)
nptst.assert_array_equal(orderedItems, orderedItems2)
def testReciprocalRankAtk(self):
m = 20
n = 50
r = 3
X, U, s, V, wv = SparseUtils.generateSparseBinaryMatrix((m,n), r, 0.5, verbose=True, csarray=True)
k = 5
orderedItems = numpy.random.randint(0, n, m*k)
orderedItems = numpy.reshape(orderedItems, (m, k))
orderedItems = numpy.array(orderedItems, numpy.int32)
(indPtr, colInds) = X.nonzeroRowsPtr()
indPtr = numpy.array(indPtr, numpy.uint32)
colInds = numpy.array(colInds, numpy.uint32)
rrs = MCEvaluatorCython.reciprocalRankAtk(indPtr, colInds, orderedItems)
rrs2 = numpy.zeros(m)
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
for j in range(k):
if orderedItems[i, j] in omegai:
rrs2[i] = 1/float(1+j)
break
nptst.assert_array_equal(rrs, rrs2)
#Test case where no items are in ranking
orderedItems = numpy.ones((m, k), numpy.int32) * (n+1)
rrs = MCEvaluatorCython.reciprocalRankAtk(indPtr, colInds, orderedItems)
nptst.assert_array_equal(rrs, numpy.zeros(m))
#Now, make all items rank 2
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
orderedItems[i, 1] = omegai[0]
rrs = MCEvaluatorCython.reciprocalRankAtk(indPtr, colInds, orderedItems)
nptst.assert_array_equal(rrs, numpy.ones(m)*0.5)
def testStratifiedRecallAtk(self):
m = 20
n = 50
r = 3
alpha = 1
X, U, V = SparseUtilsCython.generateSparseBinaryMatrixPL((m,n), r, density=0.2, alpha=alpha, csarray=True)
itemCounts = numpy.array(X.sum(0)+1, numpy.int32)
(indPtr, colInds) = X.nonzeroRowsPtr()
indPtr = numpy.array(indPtr, numpy.uint32)
colInds = numpy.array(colInds, numpy.uint32)
k = 5
orderedItems = numpy.random.randint(0, n, m*k)
orderedItems = numpy.reshape(orderedItems, (m, k))
orderedItems = numpy.array(orderedItems, numpy.int32)
beta = 0.5
recalls, denominators = MCEvaluatorCython.stratifiedRecallAtk(indPtr, colInds, orderedItems, itemCounts, beta)
recalls2 = numpy.zeros(m)
#Now compute recalls from scratch
for i in range(m):
omegai = colInds[indPtr[i]:indPtr[i+1]]
numerator = 0
for j in range(k):
if orderedItems[i, j] in omegai:
numerator += 1/itemCounts[orderedItems[i, j]]**beta
denominator = 0
for j in omegai:
denominator += 1/itemCounts[j]**beta
recalls2[i] = numerator/denominator
nptst.assert_array_equal(recalls, recalls2)
#Now try to match with normal recall
itemCounts = numpy.ones(n, numpy.int32)
recalls, denominators = MCEvaluatorCython.stratifiedRecallAtk(indPtr, colInds, orderedItems, itemCounts, beta)
recalls2 = MCEvaluatorCython.recallAtk(indPtr, colInds, orderedItems)
nptst.assert_array_equal(recalls, recalls2)
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
cfjhallgren/shogun | examples/undocumented/python/graphical/group_lasso.py | 11 | 7789 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import rand, randn, permutation, multivariate_normal
from shogun import BinaryLabels, RealFeatures, IndexBlock, IndexBlockGroup, FeatureBlockLogisticRegression
def generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd):
# Generates synthetic data for the logistic regression, using the example
# from [Friedman10]
# n : # of observations
# p : # of predictors
# L : # of blocks
# blk_nnz : # of non-zero coefs. in each block
# gcov : correlation within groups
# nstd : standard deviation of the added noise
# size of each block (assumed to be an integer)
pl = p / L
# generating the coefficients (betas)
coefs = np.zeros((p, 1))
for (i, nnz) in enumerate(blk_nnz):
blkcoefs = np.zeros((pl, 1))
blkcoefs[0:nnz] = np.sign(rand(nnz, 1) - 0.5)
coefs[pl * i:pl * (i + 1)] = permutation(blkcoefs)
# generating the predictors
mu = np.zeros(p)
gsigma = gcov * np.ones((pl, pl))
np.fill_diagonal(gsigma, 1.0)
Sigma = np.kron(np.eye(L), gsigma)
# the predictors come from a standard Gaussian multivariate distribution
X = multivariate_normal(mu, Sigma, n)
# linear function of the explanatory variables in X, plus noise
t = np.dot(X, coefs) + randn(n, 1) * nstd
# applying the logit
Pr = 1 / (1 + np.exp(-t))
# The response variable y[i] is a Bernoulli random variable taking
# value 1 with probability Pr[i]
y = rand(n, 1) <= Pr
# we want each _column_ in X to represent a feature vector
# y and coefs should be also 1D arrays
return X.T, y.flatten(), coefs.flatten()
def misclassified_groups(est_coefs, true_coefs, L):
# Compute the number of groups that are misclassified, i.e. the ones with
# at least one non-zero coefficient whose estimated coefficients are all
# set to zero, or viceversa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
# L : number of blocks
p = est_coefs.shape[0] # number of predictors
pl = p / L
est_nz = est_coefs != 0
true_nz = true_coefs != 0
est_blk_nzcount = np.array([sum(est_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
true_blk_nzcount = np.array([sum(true_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
return np.sum(np.logical_xor(est_blk_nzcount == 0, true_blk_nzcount == 0))
def misclassified_features(est_coefs, true_coefs):
# Compute the number of individual coefficients that are misclassified,
# i.e. estimated to be zero when the true coefficient is nonzero or
# vice-versa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
return np.sum(np.logical_xor(est_coefs == 0, true_coefs == 0))
def compute_misclassifications(cls, true_coefs, L, rel_z):
# Try the given classifier with different values of relative regularization
# parameters, store the coefficients and compute the number of groups
# and features misclassified.
# INPUTS:
# - cls : the classifier to try
# - true_coefs : the original coefficients of our synthetic example
# - L : number of blocks
# - rel_z : regularization values to try, they will be in [0,1]
# OUTPUTS:
# - est_coefs : array with the estimated coefficients, each row for a
# different value of regularization
# - misc_groups, misc_feats : see above
num_z = rel_z.shape[0]
est_coefs = np.zeros((num_z, true_coefs.shape[0]))
misc_groups = np.zeros(num_z)
misc_feats = np.zeros(num_z)
for (i, z) in enumerate(rel_z):
cls.set_z(z)
cls.train()
est_coefs[i, :] = cls.get_w()
misc_groups[i] = misclassified_groups(est_coefs[i, :], true_coefs, L)
misc_feats[i] = misclassified_features(est_coefs[i, :], true_coefs)
return est_coefs, misc_groups, misc_feats
if __name__ == '__main__':
print('FeatureBlockLogisticRegression example')
np.random.seed(956) # reproducible results
# default parameters from [Friedman10]
n = 200
p = 100
L = 10
blk_nnz = [10, 8, 6, 4, 2, 1]
gcov = 0.2
nstd = 0.4
# range of (relative) regularization values to try
min_z = 0
max_z = 1
num_z = 21
# get the data
X, y, true_coefs = generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd)
# here each column represents a feature vector
features = RealFeatures(X)
# we have to convert the labels to +1/-1
labels = BinaryLabels(np.sign(y.astype(int) - 0.5))
# SETTING UP THE CLASSIFIERS
# CLASSIFIER 1: group LASSO
# build the feature blocks and add them to the block group
pl = p / L
block_group = IndexBlockGroup()
for i in xrange(L):
block_group.add_block(IndexBlock(pl * i, pl * (i + 1)))
cls_gl = FeatureBlockLogisticRegression(0.0, features, labels, block_group)
# with set_regularization(1), the parameter z will indicate the fraction of
# the maximum regularization to use, and so z is in [0,1]
# (reference: SLEP manual)
cls_gl.set_regularization(1)
cls_gl.set_q(2.0) # it is the default anyway...
# CLASSIFIER 2: LASSO (illustrating group lasso with all group sizes = 1)
block_group_ones = IndexBlockGroup()
for i in xrange(p):
block_group_ones.add_block(IndexBlock(i, i + 1))
cls_l = FeatureBlockLogisticRegression(0.0, features, labels, block_group_ones)
cls_l.set_regularization(1)
cls_l.set_q(2.0)
# trying with different values of (relative) regularization parameters
rel_z = np.linspace(min_z, max_z, num_z)
coefs_gl, miscgp_gl, miscft_gl = compute_misclassifications(cls_gl, true_coefs, L, rel_z)
coefs_l, miscgp_l, miscft_l = compute_misclassifications(cls_l, true_coefs, L, rel_z)
# Find the best regularization for each classifier
# for the group lasso: the one that gives the fewest groups misclassified
best_z_gl = np.argmin(miscgp_gl)
# for the lasso: the one that gives the fewest features misclassified
best_z_l = np.argmin(miscft_l)
# plot the true coefs. and the signs of the estimated coefs.
fig = plt.figure()
for (coefs, best_z, name, pos) in zip([coefs_gl, coefs_l], [best_z_gl, best_z_l], ['Group lasso', 'Lasso'], [0, 1]):
ax = plt.subplot2grid((4, 2), (pos, 0), colspan=2)
plt.hold(True)
plt.plot(xrange(p), np.sign(coefs[best_z, :]), 'o', markeredgecolor='none', markerfacecolor='g')
plt.plot(xrange(p), true_coefs, '^', markersize=7, markeredgecolor='r', markerfacecolor='none', markeredgewidth=1)
plt.xticks(xrange(0, p + pl, pl))
plt.yticks([-1, 0, 1])
plt.xlim((-1, p + 1))
plt.ylim((-2, 2))
plt.grid(True)
# plt.legend(('estimated', 'true'), loc='best')
plt.title(name)
plt.xlabel('Predictor [triangles=true coefs], best reg. value = %.2f' % rel_z[best_z])
plt.ylabel('Coefficient')
ax = plt.subplot2grid((4, 2), (2, 0), rowspan=2)
plt.plot(rel_z, miscgp_gl, 'ro-', rel_z, miscgp_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Groups misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of groups misclassified')
ax = plt.subplot2grid((4, 2), (2, 1), rowspan=2)
plt.plot(rel_z, miscft_gl, 'ro-', rel_z, miscft_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Features misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of features misclassified')
plt.tight_layout(1.2, 0, 0)
plt.show()
| gpl-3.0 |
junghans/espressopp | examples/lattice_boltzmann/lb_md.py | 2 | 6167 | # DEMONSTRATION OF THE LATTICE-BOLTZMANN SIMULATION
#
import espressopp
import cProfile, pstats
from espressopp import Int3D
from espressopp import Real3D
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
plt.ion()
# create default Lennard Jones (WCA) system with 0 particles and cubic box (L=40)
num_chains = 328
#num_chains = 1
monomers_per_chain = 10
L = 16
box = (L, L, L)
bondlen = 0.97
rc = 2 * pow(2, 1./6.)
skin = 0.3
dt = 0.000001
epsilon = 0.
sigma = 1.
temperature = 1.2
print "Initial values"
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
interaction = espressopp.interaction.VerletListLennardJones(espressopp.VerletList(system, cutoff=rc))
potLJ = espressopp.interaction.LennardJones(epsilon, sigma, rc)
interaction.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interaction)
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = dt
thermostat = espressopp.integrator.LangevinThermostat(system)
thermostat.gamma = 1.0
thermostat.temperature = temperature
integrator.addExtension(thermostat)
print integrator.dt
print thermostat.gamma
print thermostat.temperature
props = ['id', 'type', 'mass', 'pos', 'v']
vel_zero = espressopp.Real3D(0.0, 0.0, 0.0)
bondlist = espressopp.FixedPairList(system.storage)
pid = 1
type = 0
mass = 1.0
chain = []
for i in range(num_chains):
startpos = system.bc.getRandomPos()
positions, bonds = espressopp.tools.topology.polymerRW(pid, startpos, monomers_per_chain, bondlen)
for k in range(monomers_per_chain):
part = [pid + k, type, mass, positions[k], vel_zero]
chain.append(part)
pid += monomers_per_chain
type += 1
system.storage.addParticles(chain, *props)
system.storage.decompose()
chain = []
bondlist.addBonds(bonds)
system.storage.decompose()
potFENE = espressopp.interaction.FENE(K=30.0, r0=0.0, rMax=1.5)
interFENE = espressopp.interaction.FixedPairListFENE(system, bondlist, potFENE)
system.addInteraction(interFENE)
#force_capping = espressopp.integrator.CapForce(system, 500.0)
#integrator.addExtension(force_capping)
espressopp.tools.analyse.info(system, integrator)
print "First phase of the warm up. Epsilon will be increased from 0. to 0.25"
new_epsilon = 0.
for k in range(10):
integrator.run(1000)
espressopp.tools.analyse.info(system, integrator)
new_epsilon += 0.025
potLJ = espressopp.interaction.LennardJones(new_epsilon, sigma, rc)
interaction.setPotential(type1=0, type2=0, potential=potLJ)
print "Second phase of the warm up with timestep 0.0001. Epsilon will be increased from to 1."
print new_epsilon
integrator.dt = 0.00005
for k in range(10):
integrator.run(1000)
espressopp.tools.analyse.info(system, integrator)
new_epsilon += 0.075
potLJ = espressopp.interaction.LennardJones(new_epsilon, sigma, rc)
interaction.setPotential(type1=0, type2=0, potential=potLJ)
print "Third phase of the warm up with timestep 0.005. Epsilon is 1."
print new_epsilon
integrator.dt = 0.005
for k in range(10):
integrator.run(1000)
espressopp.tools.analyse.info(system, integrator)
thermostat.gamma = 0.0
thermostat.temperature = 0.0
# define a LB grid
lb = espressopp.integrator.LatticeBoltzmann(system, Ni=Int3D(16, 16, 16))
initPop = espressopp.integrator.LBInitPopUniform(system,lb)
#initPop = espressopp.integrator.LBInitPopWave(system,lb)
initPop.createDenVel(1.0, Real3D(0.,0.,0.0))
# declare gammas responsible for viscosities (if they differ from 0)
lb.gamma_b = 0.5
lb.gamma_s = 0.5
# specify desired temperature (set the fluctuations if any)
#lb.lbTemp = 0.0
lb.lbTemp = 0.000025
lb.fricCoeff = 20.
#lb.fricCoeff = 0.
# add extension to the integrator
integrator.addExtension(lb)
# output velocity profile vz (x)
#lboutputVzOfX = espressopp.analysis.LBOutputProfileVzOfX(system,lb)
#OUT1=espressopp.integrator.ExtAnalyze(lboutputVzOfX,100)
#integrator.addExtension(OUT1)
# output velocity vz at a certain lattice site as a function of time
#lboutputVzInTime = espressopp.analysis.LBOutputVzInTime(system,lb)
#OUT2=espressopp.integrator.ExtAnalyze(lboutputVzInTime,100)
#integrator.addExtension(OUT2)
# output onto the screen
#lboutputScreen = espressopp.analysis.LBOutputScreen(system,lb)
#OUT3=espressopp.integrator.ExtAnalyze(lboutputScreen,100)
#integrator.addExtension(OUT3)
print integrator.dt
print thermostat.gamma
print thermostat.temperature
print lb.fricCoeff
# set external constant (gravity-like) force
#lbforce = espressopp.integrator.LBInitConstForce(system,lb)
#lbforce.setForce(Real3D(0.,0.,0.0001))
# run 500 steps with it
#integrator.run(500)
#integrator.run(100000)
# add a periodic force with a specified amplitude to the existing body force
#lbforce2 = espressopp.integrator.LBInitPeriodicForce(system,lb)
#lbforce2.addForce(Real3D(0.,0.,0.0005))
#lb.lbTemp = 0.0000005
## run 500 steps with it
#integrator.run(500)
##
plt.figure()
T = espressopp.analysis.Temperature(system)
x = []
yT = []
yTmin = 0.2
#x.append(integrator.dt * integrator.step)
#yT.append(T.compute())
yTmax = 1.8
plt.subplot(211)
gT, = plt.plot(x, yT, 'ro')
# write output to a datafile
f = open('temp_lb1.0_c1.0_L16_N328_G20_2.dat', 'a')
#integrator.run(50)
#print "new"
#integrator.run(50)
for k in range(100):
integrator.run(100)
x.append(integrator.dt * integrator.step)
currT = T.compute()
yT.append(currT)
s = str(integrator.step)
f.write(s+'\t')
s = str(currT)
f.write(s+'\n')
# yTmax = max(yT)
plt.subplot(211)
plt.axis([x[0], x[-1], yTmin, yTmax ])
gT.set_ydata(yT)
gT.set_xdata(x)
plt.draw()
plt.savefig('lb1.0_c1.0_L16_N328_G20_2.pdf')
f.close()
## add some profiling statistics for the run
##cProfile.run("integrator.run(10000)",'profiler_stats')
##p = pstats.Stats('profiler_stats')
##p.strip_dirs().sort_stats("time").print_stats(10)
| gpl-3.0 |
acrsilva/animated-zZz-machine | scatterplots_app/v1/scatterplots.py | 1 | 9294 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
sys.path.insert(0, '../lib')
from PyQt4.uic import loadUiType
from pyqtgraph.Qt import QtCore, QtGui
#from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import cachitos
import matplotlib.dates as md
from sklearn import preprocessing
import colores
import leeFichero as lf
DEBUG = 0
Ui_MainWindow, QMainWindow = loadUiType('scatterplots.ui')
class Main(QMainWindow, Ui_MainWindow):
def __init__(self, ):
super(Main, self).__init__()
self.setupUi(self)
self.epActual = 0
self.selep = self.loadData()
self.updateView()
self.configureComboBox()
self.cbSueno.clicked.connect(self.filtrarSueno)
self.cbSedentario.clicked.connect(self.filtrarSedentario)
self.cbLigera.clicked.connect(self.filtrarLigera)
self.cbModerada.clicked.connect(self.filtrarModerada)
self.btnPrev.clicked.connect(self.retroceder)
self.btnNext.clicked.connect(self.avanzar)
#self.btnSelFile.clicked.connect(self.openFile)
self.actionAbrir.triggered.connect(self.openFile)
self.cbx_izq.activated[str].connect(self.cbx_izqListener)
self.cbx_der.activated[str].connect(self.cbx_derListener)
self.filSueno = True
self.filSedentario = True
self.filLigero =True
self.filModerado = True
def configureComboBox(self):
print "Configurando combobox"
self.cbx_izq.clear()
self.cbx_der.clear()
for i in self.selep.epFiltro:
self.cbx_izq.addItem(i.nombre)
self.cbx_der.addItem(i.nombre)
if(len(self.selep.epFiltro) > 1):
self.cbx_der.setCurrentIndex(1)
self.cbx_izq.setCurrentIndex(self.epActual)
def openFile(self):
self.selep = self.loadData()
self.configureComboBox()
self.limpiarLayout()
self.updateView()
def loadData(self):
if(DEBUG): fname = '../data.csv'
else: fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
print "Abriendo fichero ", fname
selep = lf.LectorFichero(fname).selep_completo
return selep
def getTime(self, a, b, ep):
for i in self.selep.epFiltro[self.epActual + ep].temp:
if(a == i):
ind = 0
for k in self.selep.epFiltro[self.epActual + ep].flujo:
if(b == k):
print "encontrado"
return self.selep.epFiltro[self.epActual + ep].tiempo[ind]
else:
ind += 1
def onpick(self, event, ep):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print xdata[ind[0]], ydata[ind[0]]
self.label.setText('Instante ' + str(self.getTime(xdata[ind[0]], ydata[ind[0]], ep)))
def creaFiguras(self, t, a, b):
#Serie temporal
fig0 = plt.figure(tight_layout=True)
#Normalizar
preprocessing.scale(a, copy=True)
preprocessing.scale(b, copy=True)
#Curva temperatura
ax1 = fig0.add_subplot(111)
ax1.plot(t, a, '-', color=colores.temperatura)
#ax1.set_ylim([-5,5])
#ax1.set_xlabel('Tiempo (m)')
ax1.set_ylabel('Temperatura (ºC)', color=colores.temperatura)
for tl in ax1.get_yticklabels():
tl.set_color(colores.temperatura)
fig0.autofmt_xdate()
xfmt = md.DateFormatter('%H:%M')
ax1.xaxis.set_major_formatter(xfmt)
start, end = ax1.get_xlim()
#ax1.xaxis.set_ticks(np.arange(start, end, 30))
ax1.grid(True)
#Curva flujo térmico
ax2 = ax1.twinx()
ax2.plot(t, b, '-', color=colores.flujo)
#ax2.set_ylim([-5,5])
ax2.set_ylabel('Flujo térmico', color=colores.flujo)
for tl in ax2.get_yticklabels():
tl.set_color(colores.flujo)
#Scatterplot
fig1 = plt.figure(tight_layout=True)
ax1f1 = fig1.add_subplot(111)
line, = ax1f1.plot(a, b, 'o', picker=5)
#ax1f1.set_xlim([20,45])
#ax1f1.set_ylim([-20,220])
ax1f1.set_xlabel('Temperatura (ºC)', color=colores.temperatura)
ax1f1.set_ylabel('Flujo térmico', color=colores.flujo)
return fig0, fig1
def crearWidget(self, filtro, ep):
fig10, fig11 = self.creaFiguras(filtro.tiempo, filtro.temp, filtro.flujo)
canvas1 = FigureCanvas(fig10)
canvas2 = FigureCanvas(fig11)
vbox = QtGui.QGridLayout()
vbox.addWidget(QtGui.QLabel("<b>Episodio:</b> " + filtro.nombre))
vbox.addWidget(QtGui.QLabel("<b>Inicio:</b> " + str(filtro.tiempo[0])))
vbox.addWidget(QtGui.QLabel("<b>Final:</b> " + str(filtro.tiempo[-1])))
vbox.addWidget(QtGui.QLabel("<b>Duración:</b> %i min" % (len(filtro.tiempo))))
vbox.addWidget(QtGui.QLabel("<b>Coeficiente de correlación:</b> " + str(filtro.correlacion)[:5]))
vbox.addWidget(QtGui.QLabel("<b>Calorías consumidas:</b> " + str(filtro.numCalorias)[:6] + " (" + str(filtro.numCalorias/self.selep.totalCal*100)[:4] + "%)"))
vbox.addWidget(canvas1)
vbox.addWidget(canvas2)
canvas2.mpl_connect('pick_event', lambda event: self.onpick(event, ep))
return vbox
#Inserta elementos en el layout con los nuevos episodios
def updateView(self):
if(len(self.selep.epFiltro) > 0):
self.vbox = self.crearWidget(self.selep.epFiltro[self.epActual], 0)
self.layoutMatplot1.addLayout(self.vbox)
if(len(self.selep.epFiltro) > 1):
self.vbox2 = self.crearWidget(self.selep.epFiltro[self.epActual+1], 1)
self.layoutMatplot1.addLayout(self.vbox2)
#Elimina el contenido del layout actual
def limpiarLayout(self):
plt.close('all') #Cerrar todos las gráficas dibujadas para vaciar memoria
for cnt in reversed(range(self.vbox.count())):
widget = self.vbox.takeAt(cnt).widget()
if widget is not None:
widget.deleteLater()
for cnt in reversed(range(self.vbox2.count())):
widget = self.vbox2.takeAt(cnt).widget()
if widget is not None:
widget.deleteLater()
#Comprueba que los episodios no salgan del rango
def setBounds(self):
if(self.epActual > len(self.selep.epFiltro)-2):
self.epActual = len(self.selep.epFiltro)-2
def filtrarSueno(self):
print "Filtrar sueño"
self.filSueno = self.cbSueno.isChecked() #Cambiar el filtro
self.selep.update(self.filSueno, self.filSedentario, self.filLigero, self.filModerado) #Actualizar el array de episodios filtrados
self.setBounds()
self.limpiarLayout()
self.updateView()
self.configureComboBox()
def filtrarSedentario(self):
print "Filtrar sedentario"
self.filSedentario = self.cbSedentario.isChecked()
self.selep.update(self.filSueno, self.filSedentario, self.filLigero, self.filModerado)
self.setBounds()
self.limpiarLayout()
self.updateView()
self.configureComboBox()
def filtrarLigera(self):
print "Filtrar ligera"
self.filLigero = self.cbLigera.isChecked()
self.selep.update(self.filSueno, self.filSedentario, self.filLigero, self.filModerado)
self.setBounds()
self.limpiarLayout()
self.updateView()
self.configureComboBox()
def filtrarModerada(self):
print "Filtrar moderada"
self.filModerado = self.cbModerada.isChecked()
self.selep.update(self.filSueno, self.filSedentario, self.filLigero, self.filModerado)
self.setBounds()
self.limpiarLayout()
self.updateView()
self.configureComboBox()
def retroceder(self):
if (self.epActual > 0):
self.epActual -= 1
print "episodios", self.epActual, "y", self.epActual+1
self.limpiarLayout()
self.updateView()
def avanzar(self):
if (self.epActual < len(self.selep.epFiltro) - 2):
self.epActual += 1
print "episodios", self.epActual, "y", self.epActual+1
self.limpiarLayout()
self.updateView()
def cbx_izqListener(self):
self.epActual = self.cbx_izq.currentIndex()
print "episodios", self.epActual
self.limpiarLayout()
self.updateView()
def cbx_derListener(self):
self.epActual = self.cbx_der.currentIndex()
print "episodios", self.epActual
self.limpiarLayout()
self.updateView()
if __name__ == '__main__':
import sys
from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
| lgpl-3.0 |
anne-urai/RT_RDK | graphicalModels/examples/yike.py | 7 | 1176 | """
Yike's model
============
This is Yike Tang's model for weak lensing.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
pgm = daft.PGM([5.20, 2.95], origin=[-1.70, 1.65])
pgm.add_node(daft.Node("obs", r"$\epsilon^{\mathrm{obs}}_n$", 2, 3, observed=True))
pgm.add_node(daft.Node("true", r"$\epsilon^{\mathrm{true}}_n$", 1, 3))
pgm.add_edge("true", "obs")
pgm.add_node(daft.Node("alpha", r"$\alpha,\beta$", -0.25, 3))
pgm.add_edge("alpha", "true")
pgm.add_node(daft.Node("shape prior", r"$p(\alpha, \beta)$", -1.25, 3, fixed=True))
pgm.add_edge("shape prior", "alpha")
pgm.add_node(daft.Node("gamma", r"$\gamma_m$", 2, 4))
pgm.add_edge("gamma", "obs")
pgm.add_node(daft.Node("gamma prior", r"$p(\gamma)$", -0.25, 4, fixed=True))
pgm.add_edge("gamma prior", "gamma")
pgm.add_node(daft.Node("sigma", r"$\sigma_{\epsilon}$", 3.25, 3, fixed=True))
pgm.add_edge("sigma", "obs")
pgm.add_plate(daft.Plate([0.5, 2.25, 2, 1.25],
label=r"galaxies $n$"))
pgm.add_plate(daft.Plate([0.25, 1.75, 2.5, 2.75],
label=r"patches $m$"))
pgm.render()
pgm.figure.savefig("yike.pdf")
pgm.figure.savefig("yike.png", dpi=150)
| mit |
darkframemaster/Coding-Analysis | app/local/visualize.py | 2 | 3225 | #!/usr/bin/env python3
__author__='xuehao'
import threading
import os
import logging
import pylab
import matplotlib.pyplot as plt
from ..config import PIC_PATH
from ..config import PIC_REQUEST_PATH
class Draw(object):
def __init__(self, repo_name = ''):
self.repo_name = repo_name
self.save_path = PIC_PATH + repo_name
self.request_path = PIC_REQUEST_PATH + repo_name
try:
os.mkdir(self.save_path)
except:
logging.warning('PIC_PATH already exist!')
def hist(self, figure_name, data=[], buckets=10, x_label='count', y_label='Number range'):
"""
Use this function to visualize data as a hist.
Params:
data: The data will be visualized.
buckets: The number of the buckets in x.
x_label: Words will shows up in x.
y_label: Words will shows up in y.
Returns:
save_name: str
The file location of the result picture
"""
try:
os.mkdir(self.save_path + '/hist')
except:
logging.warning('update hist in '+self.save_path)
pylab.hist(data, buckets)
pylab.xlabel(x_label)
pylab.ylabel(y_label)
save_name = self.save_path + '/hist/' + figure_name
pylab.savefig(save_name)
pylab.clf()
return self.request_path + '/hist/' + figure_name + '.png'
def explode(self, figure_name, data=[], explode=[], labels=(), title='a graph'):
"""
Use this function to visualize data as a explode
Params:
data: The data will be visualized.
explode: The distance between each bucket of the data.
explode should be len(data) sequence or None.
labels: The labels shows next to the bucket of the data.
title: The title of the graph.
Returns:
save_name: str
The file location of the result picture
"""
try:
os.mkdir(self.save_path + '/explode')
except:
logging.warning('update explode in '+self.save_path)
#Make the graph square.
pylab.figure(1, figsize=(6,6))
ax = pylab.axes([0.1, 0.1, 0.8, 0.8])
pylab.title(title)
pylab.pie(data, explode = explode, labels = labels,
autopct = '%1.1f%%', startangle = 0)
save_name = self.save_path + '/explode/' + figure_name
pylab.savefig(save_name)
pylab.clf()
return self.request_path + '/explode/' + figure_name + '.png'
def scatter(self, figure_name, data1=[], data2=[], color='indigo', alpha=0.3, edgecolors='white', label='label'):
"""
User this function to visualize data as a scatter
Params:
data1: The data will be list at x axis.
data2: The data will be list at y axis.
color: The point color that shows on the graph
alpha: The color's alpha value.
edgecolors: Edge's color.
label: the label shows in the graph.
Return:
save_name: str
The file location of the result picture.
"""
try:
os.mkdir(self.save_path + '/scatter')
except:
logging.warning('update scatter in '+self.save_path)
if len(data1)==len(data2):
plt.scatter(data1, data2, color = color, alpha = alpha,
edgecolors = edgecolors, label = label)
plt.legend()
save_name = self.save_path + '/scatter/' + figure_name
plt.savefig(save_name)
plt.clf()
return self.request_path + '/scatter/' + figure_name + '.png'
else:
logging.warning('data1 should be the same lenth as data2')
| mit |
mikecroucher/GPy | GPy/models/sparse_gplvm.py | 6 | 1890 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
from .sparse_gp_regression import SparseGPRegression
from ..core import Param
class SparseGPLVM(SparseGPRegression):
"""
Sparse Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
X = Param('latent space', X)
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
self.link_parameter(self.X, 0)
def parameters_changed(self):
super(SparseGPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X_diag(self.grad_dict['dL_dKdiag'], self.X)
self.X.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'], self.X, self.Z)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e374.py | 2 | 7070 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1024,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
include_power=True,
# clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
500: 1e-3,
1500: 1e-4
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
}
]
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def exp_b(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=partial(scaled_cost3, ignore_inactive=False),
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=partial(scaled_cost3, ignore_inactive=True),
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abc')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=2000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
lux-jwang/goodoos | drawfeature.py | 1 | 2613 | import sys
sys.path.append("./src")
from similarities import CosineSimilarity, JaccardSimilarity
from dataset import get_friends_data, get_user_item_matrix
from models.friendsmodel import FriendsModel
import matplotlib.pyplot as plt
import matplotlib
def get_similarity_matrix(data_set, friend_data):
i_model = FriendsModel(data_set, friend_data)
similarity = CosineSimilarity(i_model)
return similarity
def get_friends_similarity(sim_mat, friend_data):
sim_dict = {}
for ky, values in friend_data.iteritems():
ky_sims = sim_mat.get_similarities(ky,values)
if not ky_sims:
continue
f_id, simz = zip(*ky_sims)
sim_dict[ky] = simz
for item in simz:
if item < 0.1:
print item
return sim_dict
def get_strangers_similarity(sim_mat, friend_data):
sim_dict = {}
i_model = sim_mat.model
for ky in friend_data:
strangers = i_model.get_strangers_in_roster(ky)
ky_sims = sim_mat.get_similarities(ky,strangers)
t_id, simz = zip(*ky_sims)
sim_dict[ky] = simz
return sim_dict
#plt.scatter(txs, tys, color='k', marker=r'$\bigodot$', alpha=0.9,label="strangers")
#plt.scatter(fxs, fys, color='gray', marker=r'$\bullet$', alpha=0.6,label="friends")
def draw_similarity(friend_sims, stranger_sims):
#print friend_sims
matplotlib.rcParams.update({'font.size':28})
fys, fxs=zip(*((x, k) for k in friend_sims for x in friend_sims[k]))
tys, txs=zip(*((xt, kt) for kt in stranger_sims for xt in stranger_sims[kt]))
fig = plt.figure()
ax = fig.add_subplot(111)
#matplotlib.rcParams.update({'font.size':28})
#plt.plot(txs, tys, 'k>',alpha=0.9,label="strangers")
#plt.plot(fxs, fys, 'c*',alpha=0.6,label="friends")
stranger, = plt.plot(txs, tys, ' ', color='lightgrey', marker=r's', alpha=0.9,label="strangers",)
friend, = plt.plot(fxs, fys, ' ',color='k', marker='>', alpha=0.8,label="friends")
plt.title('Cosine Similarity')
plt.axis('tight')
plt.ylabel("Similarity")
plt.xlabel("User ID")
plt.legend(loc='lower right')
plt.show()
if __name__ == '__main__':
raw_data = get_user_item_matrix()
friend_data = get_friends_data()
sim_mat = get_similarity_matrix(raw_data,friend_data)
f_sim = get_friends_similarity(sim_mat,friend_data)
t_sim = get_strangers_similarity(sim_mat,friend_data)
idx = 1
f_sims = {}
t_sims = {}
for ky in f_sim:
f_sims[idx] = f_sim[ky]
t_sims[idx] = t_sim[ky]
idx += 1
draw_similarity(f_sims,t_sims)
| mit |
nikitasingh981/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 9 | 10353 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils.fixes import bincount
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
| bsd-3-clause |
yandex-load/volta | volta/listeners/console/plugin.py | 1 | 1376 | import logging
from volta.common.interfaces import DataListener
logger = logging.getLogger(__name__)
class ConsoleListener(DataListener):
"""
Prints stats to console every second
"""
def __init__(self, config, core):
"""
Args:
config: config to listeners, config.fname should store a name of file
"""
super(ConsoleListener, self).__init__(config, core)
self.closed = None
self.output_fmt = {
'currents': ['ts', 'value'],
'sync': ['sys_uts', 'log_uts', 'app', 'tag', 'message'],
'event': ['sys_uts', 'log_uts', 'app', 'tag', 'message'],
'metric': ['sys_uts', 'log_uts', 'app', 'tag', 'value'],
'fragment': ['sys_uts', 'log_uts', 'app', 'tag', 'message'],
'unknown': ['sys_uts', 'message']
}
self.core.data_session.manager.subscribe(self.put, {'type': 'metrics', 'name': 'current'})
def get_info(self):
""" mock """
pass
def put(self, df):
""" Process data
Args:
df (pandas.DataFrame): dfs w/ data contents,
differs for each data type.
Should be processed differently from each other
"""
if not self.closed:
logger.info("\n%s\n", df.describe())
def close(self):
self.closed = True
| mpl-2.0 |
vermouthmjl/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
KevinHCChen/wireless-aoa | simulations/rayTraceAOA.py | 1 | 4550 | import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from matplotlib import colors
import sys
import itertools
from sklearn.cross_validation import LeavePOut, train_test_split
from sklearn.svm import SVC as SVM
from sklearn.svm import SVR
from sklearn.linear_model import LogisticRegression as LR
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.neighbors import KNeighborsRegressor as KNNR
from MLP import MLPClassifier
from MLP import MLPRegressor
from scipy.stats import norm
import math
from Convert2Complex import *
plt.ion()
########################################################################################
#### Utilities
########################################################################################
def angle2c(theta):
return 1j*np.sin(theta)+np.cos(theta)
def steeringVector(theta):
phase_diff = distance_between_rx*np.cos(theta)*2*np.pi/wavelength
return angle2c(np.array([rx*phase_diff for rx in range(rx_num)]))
def angleDiff(signal1, signal2):
return np.mod(np.angle(signal1)-np.angle(signal2)+np.pi, 2*np.pi)-np.pi
def angular_diff(samples):
return [angleDiff( samples[i+1], samples[i] ) for i in range(len(samples)-1) ]
def shift2degree(x):
return np.arccos(x)/np.pi*180.
def degree2shift(x):
return (np.cos(x/180.*np.pi))
def identity(x):
return x
_colors = ['b','g','r','m','c','k']
########################################################################################
#### Load Data
########################################################################################
X, Y = loadData('../data/1800TxTry2/','', 1)
#X = np.load('X.npy')
#Y = np.load('Y.npy')
X = X[:800,:]
Y = Y[:800]
rep_factor = 2
X = np.repeat(X,rep_factor,axis=0)
Y = np.repeat(Y,rep_factor,axis=0)
X = X + norm.rvs(0, 1e-8, size=X.shape)
X = np.angle(X)
X = X[:,1:] - X[:,:-1]
########################################################################################
#### Classifier Initialization
########################################################################################
hl_sizes = [(1000,), (100,50), (200,50), (500,50), (1000,50), \
(100,100), (200,100), (500,100), (1000,100), \
(200,100,20), (500,100,20), (1000,100,20)]
regressors = []
#regressors.append( KNNR(n_neighbors=3))
regressors.append(SVR(kernel='linear', C=1e3, gamma=0.1))
regressors.append(SVR(kernel='poly', C=1e3, degree=2))
regressors.append( MLPRegressor(hidden_layer_sizes=(800,50), activation='relu', verbose=False,
algorithm='adam', alpha=0.000, tol=1e-8, early_stopping=True))
#for hl_size in hl_sizes:
# regressors.append( MLPRegressor(hidden_layer_sizes=hl_size, activation='relu', verbose=False,
# algorithm='adam', alpha=0.000, tol=1e-8, early_stopping=True))
########################################################################################
#### Test models
########################################################################################
# expand data (corresponds to nonlinear kernels)
#X = np.hstack([X**p for p in range(1,4)])
#Y = Y/360.
test_size=.2
plt.figure(1); plt.clf();
for i, regressor in enumerate(regressors):
if regressor.__class__.__name__ != "SVR":
Y = Y/360.
trainX, testX, trainY, testY = train_test_split(X,Y, test_size=test_size)
regressor.fit(trainX, trainY)
plt.plot(testY*360., regressor.predict(testX)*360. , _colors[i]+'o', alpha=0.8, label=regressor.__class__.__name__)
#plt.plot(trainY*360., regressor.predict(trainX)*360. , _colors[i]+'.', alpha=0.4)
else:
trainX, testX, trainY, testY = train_test_split(X,Y, test_size=test_size)
regressor.fit(trainX, trainY)
plt.plot(testY, regressor.predict(testX) , _colors[i]+'o', alpha=0.8, label=regressor.__class__.__name__)
#plt.plot(trainY, regressor.predict(trainX) , _colors[i]+'.', alpha=0.4)
#plt.plot(testY, regressor.predict(testX) , 'o', alpha=0.4, label=regressor.__class__.__name__)
#plt.plot(trainY, regressor.predict(trainX) ,'.', alpha=0.4)
print "{} precision = {:.4f}".format(regressor.__class__.__name__, regressor.score(testX, testY))
#print "sizes: %s" % (regressor.hidden_layer_sizes,)
plt.plot(testY*360., testY*360., 'k-.', alpha=1, label='ground truth')
plt.legend(loc='best')
plt.xlim([260,370])
plt.ylim([260,370])
plt.xlabel("Ground Truth AOA")
plt.ylabel("Predicted AOA")
| mit |
brandonckelly/carma_pack | src/carmcmc/carma_pack.py | 1 | 68411 | __author__ = 'Brandon C. Kelly'
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve
from scipy.optimize import minimize
import samplers
import multiprocessing
import _carmcmc as carmcmcLib
class CarmaModel(object):
"""
Class for performing statistical inference assuming a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, p=1, q=0):
"""
Constructor for the CarmaModel class.
:param time: The observation times.
:param y: The measured time series.
:param ysig: The standard deviation in the measurements errors on the time series.
:param p: The order of the autoregressive (AR) polynomial. Default is p = 1.
:param q: The order of the moving average (MA) polynomial. Default is q = 0. Note that p > q.
"""
try:
p > q
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynomial, q."
# check that time values are unique and in ascending ordered
s_idx = np.argsort(time)
t_unique, u_idx = np.unique(time[s_idx], return_index=True)
u_idx = s_idx[u_idx]
# convert input to std::vector<double> extension class
self._time = carmcmcLib.vecD()
self._time.extend(time[u_idx])
self._y = carmcmcLib.vecD()
self._y.extend(y[u_idx])
self._ysig = carmcmcLib.vecD()
self._ysig.extend(ysig[u_idx])
# save parameters
self.time = time[u_idx]
self.y = y[u_idx]
self.ysig = ysig[u_idx]
self.p = p
self.q = q
self.mcmc_sample = None
def run_mcmc(self, nsamples, nburnin=None, ntemperatures=None, nthin=1, init=None):
"""
Run the MCMC sampler. This is actually a wrapper that calls the C++ code that runs the MCMC sampler.
:param nsamples: The number of samples from the posterior to generate.
:param ntemperatures: Number of parallel MCMC chains to run in the parallel tempering algorithm. Default is 1
(no tempering) for p = 1 and max(10, p+q) for p > 1.
:param nburnin: Number of burnin iterations to run. The default is nsamples / 2.
:param nthin: Thinning interval for the MCMC sampler. Default is 1 (no thinning).
:return: Either a CarmaSample or Car1Sample object, depending on the values of self.p. The CarmaSample object
will also be stored as a data member of the CarmaModel object.
"""
if ntemperatures is None:
ntemperatures = max(10, self.p + self.q)
if nburnin is None:
nburnin = nsamples / 2
if init is None:
init = carmcmcLib.vecD()
if self.p == 1:
# Treat the CAR(1) case separately
cppSample = carmcmcLib.run_mcmc_car1(nsamples, nburnin, self._time, self._y, self._ysig,
nthin, init)
# run_mcmc_car1 returns a wrapper around the C++ CAR1 class, convert to python object
sample = Car1Sample(self.time, self.y, self.ysig, cppSample)
else:
cppSample = carmcmcLib.run_mcmc_carma(nsamples, nburnin, self._time, self._y, self._ysig,
self.p, self.q, ntemperatures, False, nthin, init)
# run_mcmc_car returns a wrapper around the C++ CARMA class, convert to a python object
sample = CarmaSample(self.time, self.y, self.ysig, cppSample, q=self.q)
self.mcmc_sample = sample
return sample
def get_mle(self, p, q, ntrials=100, njobs=1):
"""
Return the maximum likelihood estimate (MLE) of the CARMA model parameters. This is done by using the
L-BFGS-B algorithm from scipy.optimize on ntrials randomly distributed starting values of the parameters. This
this return NaN for more complex CARMA models, especially if the data are not well-described by a CARMA model.
In addition, the likelihood space can be highly multi-modal, and there is no guarantee that the global MLE will
be found using this procedure.
@param p: The order of the AR polynomial.
@param q: The order of the MA polynomial. Must be q < p.
@param ntrials: The number of random starting values for the optimizer. Default is 100.
@param njobs: The number of processors to use. If njobs = -1, then all of them are used. Default is njobs = 1.
@return: The scipy.optimize.Result object corresponding to the MLE.
"""
if njobs == -1:
njobs = multiprocessing.cpu_count()
args = [(p, q, self.time, self.y, self.ysig)] * ntrials
if njobs == 1:
MLEs = map(_get_mle_single, args)
else:
# use multiple processors
pool = multiprocessing.Pool(njobs)
# warm up the pool
pool.map(int, range(multiprocessing.cpu_count()))
MLEs = pool.map(_get_mle_single, args)
pool.terminate()
best_MLE = MLEs[0]
for MLE in MLEs:
if MLE.fun < best_MLE.fun: # note that MLE.fun is -loglik since we use scipy.optimize.minimize
# new MLE found, save this value
best_MLE = MLE
print best_MLE.message
return best_MLE
def choose_order(self, pmax, qmax=None, pqlist=None, njobs=1, ntrials=100):
"""
Choose the order of the CARMA model by minimizing the AICc(p,q). This first computes the maximum likelihood
estimate on a grid of (p,q) values using self.get_mle, and then choosing the value of (p,q) that minimizes
the AICc. These values of p and q are stored as self.p and self.q.
@param pmax: The maximum order of the AR(p) polynomial to search over.
@param qmax: The maximum order of the MA(q) polynomial to search over. If none, search over all possible values
of q < p.
@param pqlist: A list of (p,q) tuples. If supplied, the (p,q) pairs are used instead of being generated from the
values of pmax and qmax.
@param njobs: The number of processors to use for calculating the MLE. A value of njobs = -1 will use all
available processors.
@param ntrials: The number of random starts to use in the MLE, the default is 100.
@return: A tuple of (MLE, pqlist, AICc). MLE is a scipy.optimize.Result object containing the maximum-likelihood
estimate. pqlist contains the values of (p,q) used in the search, and AICc contains the values of AICc for
each (p,q) pair in pqlist.
"""
try:
pmax > 0
except ValueError:
"Order of AR polynomial must be at least 1."
if qmax is None:
qmax = pmax - 1
try:
pmax > qmax
except ValueError:
" Order of AR polynomial, p, must be larger than order of MA polynimial, q."
if pqlist is None:
pqlist = []
for p in xrange(1, pmax+1):
for q in xrange(p):
pqlist.append((p, q))
MLEs = []
for pq in pqlist:
MLE = self.get_mle(pq[0], pq[1], ntrials=ntrials, njobs=njobs)
MLEs.append(MLE)
best_AICc = 1e300
AICc = []
best_MLE = MLEs[0]
print 'p, q, AICc:'
for MLE, pq in zip(MLEs, pqlist):
nparams = 2 + pq[0] + pq[1]
deviance = 2.0 * MLE.fun
this_AICc = 2.0 * nparams + deviance + 2.0 * nparams * (nparams + 1.0) / (self.time.size - nparams - 1.0)
print pq[0], pq[1], this_AICc
AICc.append(this_AICc)
if this_AICc < best_AICc:
# new optimum found, save values
best_MLE = MLE
best_AICc = this_AICc
self.p = pq[0]
self.q = pq[1]
print 'Model with best AICc has p =', self.p, ' and q = ', self.q
return best_MLE, pqlist, AICc
def _get_mle_single(args):
p, q, time, y, ysig = args
nsamples = 1
nburnin = 25
nwalkers = 10
# get a CARMA process object by running the MCMC sampler for a very short period. This will provide the initial
# guess and the function to compute the log-posterior
tvec = arrayToVec(time) # convert to std::vector<double> object for input into C++ wrapper
yvec = arrayToVec(y)
ysig_vec = arrayToVec(ysig)
if p == 1:
# Treat the CAR(1) case separately
CarmaProcess = carmcmcLib.run_mcmc_car1(nsamples, nburnin, tvec, yvec, ysig_vec, 1)
else:
CarmaProcess = carmcmcLib.run_mcmc_carma(nsamples, nburnin, tvec, yvec, ysig_vec,
p, q, nwalkers, False, 1)
initial_theta = CarmaProcess.getSamples()
initial_theta = np.array(initial_theta[0])
initial_theta[1] = 1.0 # initial guess for measurement error scale parameter
# set bounds on parameters
ysigma = y.std()
dt = time[1:] - time[:-1]
max_freq = 1.0 / dt.min()
max_freq = 0.9 * max_freq
min_freq = 1.0 / (time.max() - time.min())
theta_bnds = [(ysigma / 10.0, 10.0 * ysigma)]
theta_bnds.append((0.9, 1.1))
theta_bnds.append((None, None))
if p == 1:
theta_bnds.append((np.log(min_freq), np.log(max_freq)))
else:
# monte carlo estimates of bounds on quadratic term parameterization of AR(p) roots
qterm_lbound = min(min_freq ** 2, 2.0 * min_freq)
qterm_lbound = np.log(qterm_lbound)
qterm_ubound = max(max_freq ** 2, 2.0 * max_freq)
qterm_ubound = np.log(qterm_ubound)
theta_bnds.extend([(qterm_lbound, qterm_ubound)] * p)
# no bounds on MA coefficients
if q > 0:
theta_bnds.extend([(None, None)] * q)
CarmaProcess.SetMLE(True) # ignore the prior bounds when calculating CarmaProcess.getLogDensity in C++ code
# make sure initial guess of theta does not violate bounds
for j in xrange(len(initial_theta)):
if theta_bnds[j][0] is not None:
if (initial_theta[j] < theta_bnds[j][0]) or (initial_theta[j] > theta_bnds[j][1]):
initial_theta[j] = np.random.uniform(theta_bnds[j][0], theta_bnds[j][1])
thisMLE = minimize(_carma_loglik, initial_theta, args=(CarmaProcess,), method="L-BFGS-B", bounds=theta_bnds)
return thisMLE
def _carma_loglik(theta, args):
CppCarma = args
theta_vec = carmcmcLib.vecD()
theta_vec.extend(theta)
logdens = CppCarma.getLogDensity(theta_vec)
return -logdens
class CarmaSample(samplers.MCMCSample):
"""
Class for storing and analyzing the MCMC samples of a CARMA(p,q) model.
"""
def __init__(self, time, y, ysig, sampler, q=0, filename=None, MLE=None):
"""
Constructor for the CarmaSample class. In general a CarmaSample object should never be constructed directly,
but should be constructed by calling CarmaModel.run_mcmc().
@param time: The array of time values for the time series.
@param y: The array of measured values for the time series.
@param ysig: The array of measurement noise standard deviations for the time series.
@param sampler: A C++ object return by _carmcmcm.run_carma_mcmc(). In general this should not be obtained
directly, but a CarmaSample object should be obtained by running CarmaModel.run_mcmc().
@param q: The order of the MA polynomial.
@param filename: A string of the name of the file containing the MCMC samples generated by the C++ carpack.
@param MLE: The maximum-likelihood estimate, obtained as a scipy.optimize.Result object.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.q = q # order of moving average polynomial
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
# now calculate the AR(p) characteristic polynomial roots, coefficients, MA coefficients, and amplitude of
# driving noise and add them to the MCMC samples
print "Calculating PSD Lorentzian parameters..."
self._ar_roots()
print "Calculating coefficients of AR polynomial..."
self._ar_coefs()
if self.q > 0:
print "Calculating coefficients of MA polynomial..."
self._ma_coefs(trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
sampler.SetMLE(True)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
# loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
loglik[i] = sampler.getLogDensity(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the user knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
self.mle = {}
if MLE is not None:
# add maximum a posteriori estimate
self.add_mle(MLE)
def add_mle(self, MLE):
"""
Add the maximum-likelihood estimate to the CarmaSample object. This will convert the MLE to a dictionary, and
add it as a data member of the CarmaSample object. The values can be accessed as self.mle['parameter']. For
example, the MLE of the CARMA process variance is accessed as self.mle['var'].
@param MLE: The maximum-likelihood estimate, returned by CarmaModel.get_mle() or CarmaModel.choose_order().
"""
self.mle = {'loglik': -MLE.fun, 'var': MLE.x[0] ** 2, 'measerr_scale': MLE.x[1], 'mu': MLE.x[2]}
# add AR polynomial roots and PSD lorentzian parameters
quad_coefs = np.exp(MLE.x[3:self.p + 3])
ar_roots = np.zeros(self.p, dtype=complex)
psd_width = np.zeros(self.p)
psd_cent = np.zeros(self.p)
for i in xrange(self.p / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ar_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ar_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
psd_width[2 * i] = -np.real(ar_roots[2 * i]) / (2.0 * np.pi)
psd_cent[2 * i] = np.abs(np.imag(ar_roots[2 * i])) / (2.0 * np.pi)
psd_width[2 * i + 1] = -np.real(ar_roots[2 * i + 1]) / (2.0 * np.pi)
psd_cent[2 * i + 1] = np.abs(np.imag(ar_roots[2 * i + 1])) / (2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
ar_roots[-1] = -quad_coefs[-1]
psd_cent[-1] = 0.0
psd_width[-1] = quad_coefs[-1] / (2.0 * np.pi)
self.mle['ar_roots'] = ar_roots
self.mle['psd_width'] = psd_width
self.mle['psd_cent'] = psd_cent
self.mle['ar_coefs'] = np.poly(ar_roots).real
# now calculate the moving average coefficients
if self.q == 0:
self.mle['ma_coefs'] = 1.0
else:
quad_coefs = np.exp(MLE.x[3 + self.p:])
ma_roots = np.empty(quad_coefs.size, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[2 * i]
quad2 = quad_coefs[2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
if discriminant > 0:
sqrt_disc = np.sqrt(discriminant)
else:
sqrt_disc = 1j * np.sqrt(np.abs(discriminant))
ma_roots[2 * i] = -0.5 * (quad2 + sqrt_disc)
ma_roots[2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
ma_roots[-1] = -quad_coefs[-1]
ma_coefs = np.poly(ma_roots)
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
self.mle['ma_coefs'] = np.real(ma_coefs / ma_coefs[self.q])[::-1]
# finally, calculate sigma, the standard deviation in the driving white noise
unit_var = carma_variance(1.0, self.mle['ar_roots'], np.atleast_1d(self.mle['ma_coefs']))
self.mle['sigma'] = np.sqrt(self.mle['var'] / unit_var.real)
def set_logpost(self, logpost):
"""
Add the input log-posterior MCMC values to the CarmaSample parameter dictionary.
@param logpost: The values of the log-posterior obtained from the MCMC sampler.
"""
self._samples['logpost'] = logpost # log-posterior of the CAR(p) model
def generate_from_trace(self, trace):
"""
Generate the dictionary of MCMC samples for the CARMA process parameters from the input array.
@param trace: An array containing the MCMC samples.
"""
# Figure out how many AR terms we have
self.p = trace.shape[1] - 3 - self.q
names = ['var', 'measerr_scale', 'mu', 'quad_coefs']
if names != self._samples.keys():
idx = 0
# Parameters are not already in the dictionary, add them.
self._samples['var'] = (trace[:, 0] ** 2) # Variance of the CAR(p) process
self._samples['measerr_scale'] = trace[:, 1] # Measurement errors are scaled by this much.
self._samples['mu'] = trace[:, 2] # model mean of time series
# AR(p) polynomial is factored as a product of quadratic terms:
# alpha(s) = (quad_coefs[0] + quad_coefs[1] * s + s ** 2) * ...
self._samples['quad_coefs'] = np.exp(trace[:, 3:self.p + 3])
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples from carpack.
:param filename: The name of the file containing the MCMC samples generated by carpack.
"""
# TODO: put in exceptions to make sure files are ready correctly
# Grab the MCMC output
trace = np.genfromtxt(filename[0], skip_header=1)
self.generate_from_trace(trace[:, 0:-1])
self.set_logpost(trace[:, -1])
def _ar_roots(self):
"""
Calculate the roots of the CARMA(p,q) characteristic polynomial and add them to the MCMC samples.
"""
var = self._samples['var']
quad_coefs = self._samples['quad_coefs']
self._samples['ar_roots'] = np.empty((var.size, self.p), dtype=complex)
self._samples['psd_centroid'] = np.empty((var.size, self.p))
self._samples['psd_width'] = np.empty((var.size, self.p))
for i in xrange(self.p / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
self._samples['ar_roots'][:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
self._samples['ar_roots'][:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
self._samples['psd_width'][:, 2 * i] = -np.real(self._samples['ar_roots'][:, 2 * i]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i])) / \
(2.0 * np.pi)
self._samples['psd_width'][:, 2 * i + 1] = -np.real(self._samples['ar_roots'][:, 2 * i + 1]) / (2.0 * np.pi)
self._samples['psd_centroid'][:, 2 * i + 1] = np.abs(np.imag(self._samples['ar_roots'][:, 2 * i + 1])) / \
(2.0 * np.pi)
if self.p % 2 == 1:
# p is odd, so add in root from linear term
self._samples['ar_roots'][:, -1] = -quad_coefs[:, -1]
self._samples['psd_centroid'][:, -1] = 0.0
self._samples['psd_width'][:, -1] = quad_coefs[:, -1] / (2.0 * np.pi)
def _ma_coefs(self, trace):
"""
Calculate the CARMA(p,q) moving average coefficients and add them to the MCMC samples.
"""
nsamples = trace.shape[0]
if self.q == 0:
self._samples['ma_coefs'] = np.ones((nsamples, 1))
else:
quad_coefs = np.exp(trace[:, 3 + self.p:])
roots = np.empty(quad_coefs.shape, dtype=complex)
for i in xrange(self.q / 2):
quad1 = quad_coefs[:, 2 * i]
quad2 = quad_coefs[:, 2 * i + 1]
discriminant = quad2 ** 2 - 4.0 * quad1
sqrt_disc = np.where(discriminant > 0, np.sqrt(discriminant), 1j * np.sqrt(np.abs(discriminant)))
roots[:, 2 * i] = -0.5 * (quad2 + sqrt_disc)
roots[:, 2 * i + 1] = -0.5 * (quad2 - sqrt_disc)
if self.q % 2 == 1:
# q is odd, so add in root from linear term
roots[:, -1] = -quad_coefs[:, -1]
coefs = np.empty((nsamples, self.q + 1), dtype=complex)
for i in xrange(nsamples):
coefs_i = np.poly(roots[i, :])
# normalize so constant in polynomial is unity, and reverse order to be consistent with MA
# representation
coefs[i, :] = (coefs_i / coefs_i[self.q])[::-1]
self._samples['ma_coefs'] = coefs.real
def _ar_coefs(self):
"""
Calculate the CARMA(p,q) autoregressive coefficients and add them to the MCMC samples.
"""
roots = self._samples['ar_roots']
coefs = np.empty((roots.shape[0], self.p + 1), dtype=complex)
for i in xrange(roots.shape[0]):
coefs[i, :] = np.poly(roots[i, :])
self._samples['ar_coefs'] = coefs.real
def _sigma_noise(self):
"""
Calculate the MCMC samples of the standard deviation of the white noise driving process and add them to the
MCMC samples.
"""
# get the CARMA(p,q) model variance of the time series
var = self._samples['var']
# get the roots of the AR(p) characteristic polynomial
ar_roots = self._samples['ar_roots']
# get the moving average coefficients
ma_coefs = self._samples['ma_coefs']
# calculate the variance of a CAR(p) process, assuming sigma = 1.0
sigma1_variance = np.zeros_like(var) + 0j
for k in xrange(self.p):
denom = -2.0 * ar_roots[:, k].real + 0j
for l in xrange(self.p):
if l != k:
denom *= (ar_roots[:, l] - ar_roots[:, k]) * (np.conjugate(ar_roots[:, l]) + ar_roots[:, k])
ma_sum1 = np.zeros_like(ar_roots[:, 0])
ma_sum2 = ma_sum1.copy()
for l in xrange(ma_coefs.shape[1]):
ma_sum1 += ma_coefs[:, l] * ar_roots[:, k] ** l
ma_sum2 += ma_coefs[:, l] * (-1.0 * ar_roots[:, k]) ** l
numer = ma_sum1 * ma_sum2
sigma1_variance += numer / denom
sigsqr = var / sigma1_variance.real
# add the white noise sigmas to the MCMC samples
self._samples['sigma'] = np.sqrt(sigsqr)
def plot_power_spectrum(self, percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None,
doShow=True):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CARMA(p,q) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of frequency,
the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies). If no subplot axes object
is supplied (i.e., if sp = None), then the subplot axes object used will also be returned as the last
element of the tuple.
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them. Use less samples for increased speed.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
:param color: The color of the shaded credibility region.
:param alpha: The transparency level.
:param sp: A matplotlib subplot axes object to use.
:param doShow: If true, call plt.show()
"""
sigmas = self._samples['sigma']
ar_coefs = self._samples['ar_coefs']
ma_coefs = self._samples['ma_coefs']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
ar_coefs = ar_coefs[index]
ma_coefs = ma_coefs[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 0.5 / dt_min
freq_min = 1.0 / dt_max
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
# Compute the PSDs from the MCMC samples
omega = 2.0 * np.pi * 1j * frequencies
ar_poly = np.zeros((nfreq, nsamples), dtype=complex)
ma_poly = np.zeros_like(ar_poly)
for k in xrange(self.p):
# Here we compute:
# alpha(omega) = ar_coefs[0] * omega^p + ar_coefs[1] * omega^(p-1) + ... + ar_coefs[p]
# Note that ar_coefs[0] = 1.0.
argrid, omgrid = np.meshgrid(ar_coefs[:, k], omega)
ar_poly += argrid * (omgrid ** (self.p - k))
ar_poly += ar_coefs[:, self.p]
for k in xrange(ma_coefs.shape[1]):
# Here we compute:
# delta(omega) = ma_coefs[0] + ma_coefs[1] * omega + ... + ma_coefs[q] * omega^q
magrid, omgrid = np.meshgrid(ma_coefs[:, k], omega)
ma_poly += magrid * (omgrid ** k)
psd_samples = np.squeeze(sigmas) ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
# Now compute credibility interval for power spectrum
psd_credint[:, 0] = np.percentile(psd_samples, lower, axis=1)
psd_credint[:, 2] = np.percentile(psd_samples, upper, axis=1)
psd_credint[:, 1] = np.median(psd_samples, axis=1)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
ar_roots = self._samples['ar_roots'][max_index]
ma_coefs = self._samples['ma_coefs'][max_index]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
ar_roots = np.median(self._samples['ar_roots'], axis=0)
ma_coefs = np.median(self._samples['ma_coefs'], axis=0)
elif bestfit == 'mean':
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
ar_roots = np.mean(self._samples['ar_roots'], axis=0)
ma_coefs = np.mean(self._samples['ma_coefs'], axis=0)
else:
# use a random draw from the posterior
random_index = np.random.random_integers(0, self._samples.values()[0].shape[0] - 1)
sigsqr = (self._samples['sigma'][random_index] ** 2)[0]
mu = self._samples['mu'][random_index][0]
ar_roots = self._samples['ar_roots'][random_index]
ma_coefs = self._samples['ma_coefs'][random_index]
# expose C++ Kalman filter class to python
kfilter = carmcmcLib.KalmanFilterp(arrayToVec(self.time),
arrayToVec(self.y - mu),
arrayToVec(self.ysig),
sigsqr,
arrayToVec(ar_roots, carmcmcLib.vecC),
arrayToVec(ma_coefs))
return kfilter, mu
def assess_fit(self, bestfit="map", nplot=256, doShow=True):
"""
Display plots and provide useful information for assessing the quality of the CARMA(p,q) model fit.
:param bestfit: A string specifying how to define 'best-fit'. Can be the maximum a posteriori value (MAP),
the posterior mean ("mean"), or the posterior median ("median").
:param nplot: The number of interpolated time series values to plot.
:param doShow: If true, call pyplot.show(). Else if false, return the matplotlib figure object.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean']
except ValueError:
"bestfit must be one of 'map, 'median', or 'mean'"
fig = plt.figure()
# compute the marginal mean and variance of the predicted values
time_predict = np.linspace(1.001 * self.time.min(), self.time.max(), nplot)
predicted_mean, predicted_var = self.predict(time_predict, bestfit=bestfit)
predicted_low = predicted_mean - np.sqrt(predicted_var)
predicted_high = predicted_mean + np.sqrt(predicted_var)
# plot the time series and the marginal 1-sigma error bands
plt.subplot(221)
plt.fill_between(time_predict, predicted_low, predicted_high, color='cyan')
plt.plot(time_predict, predicted_mean, '-b', label='Interpolation')
plt.plot(self.time, self.y, 'k.', label='Data')
plt.xlabel('Time')
plt.xlim(self.time.min(), self.time.max())
#plt.legend()
# plot the standardized residuals and compare with the standard normal
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
kmean = np.asarray(kfilter.GetMean())
kvar = np.asarray(kfilter.GetVar())
standardized_residuals = (self.y - mu - kmean) / np.sqrt(kvar)
plt.subplot(222)
plt.xlabel('Time')
plt.ylabel('Standardized Residuals')
plt.xlim(self.time.min(), self.time.max())
# Now add the histogram of values to the standardized residuals plot
pdf, bin_edges = np.histogram(standardized_residuals, bins=10)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the residual plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.4 * self.time.max()
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0])
# now overplot the expected standard normal distribution
expected_pdf = np.exp(-0.5 * bin_edges ** 2)
expected_pdf = expected_pdf / expected_pdf.max() * 0.4 * self.time.max()
plt.plot(expected_pdf, bin_edges, 'DarkOrange', lw=2)
plt.plot(self.time, standardized_residuals, '.k')
# plot the autocorrelation function of the residuals and compare with the 95% confidence intervals for white
# noise
plt.subplot(223)
maxlag = 50
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(standardized_residuals, maxlags=maxlag, lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Residuals')
# plot the autocorrelation function of the squared residuals and compare with the 95% confidence intervals for
# white noise
plt.subplot(224)
squared_residuals = standardized_residuals ** 2
wnoise_upper = 1.96 / np.sqrt(self.time.size)
wnoise_lower = -1.96 / np.sqrt(self.time.size)
plt.fill_between([0, maxlag], wnoise_upper, wnoise_lower, facecolor='grey')
lags, acf, not_needed1, not_needed2 = plt.acorr(squared_residuals - squared_residuals.mean(), maxlags=maxlag,
lw=2)
plt.xlim(0, maxlag)
plt.xlabel('Time Lag')
plt.ylabel('ACF of Sqrd. Resid.')
plt.tight_layout()
if doShow:
plt.show()
else:
return fig
def predict(self, time, bestfit='map'):
"""
Return the predicted value of the time series and its standard deviation at the input time(s) given the best-fit
value of the CARMA(p,q) model and the measured time series.
:param time: A scalar or numpy array containing the time values to predict the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean"), the posterior median ("median"), or a random sample from the MCMC sampler ("random").
:rtype : A tuple of numpy arrays containing the expected value and variance of the time series at the input
time values.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean', 'random']
except ValueError:
"bestfit must be one of 'map, 'median', 'mean', or 'random'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
if np.isscalar(time):
pred = kfilter.Predict(time)
yhat = pred.first
yhat_var = pred.second
else:
yhat = np.empty(time.size)
yhat_var = np.empty(time.size)
for i in xrange(time.size):
pred = kfilter.Predict(time[i])
yhat[i] = pred.first
yhat_var[i] = pred.second
yhat += mu # add mean back into time series
return yhat, yhat_var
def simulate(self, time, bestfit='map'):
"""
Simulate a time series at the input time(s) given the best-fit value of the CARMA(p,q) model and the measured
time series.
:param time: A scalar or numpy array containing the time values to simulate the time series at.
:param bestfit: A string specifying how to define 'best-fit'. Can be the Maximum Posterior (MAP), the posterior
mean ("mean"), the posterior median ("median"), or a random sample from the MCMC sampler ("random").
:rtype : The time series values simulated at the input values of time.
"""
bestfit = bestfit.lower()
try:
bestfit in ['map', 'median', 'mean', 'random']
except ValueError:
"bestfit must be one of 'map, 'median', 'mean', 'random'"
# note that KalmanFilter class assumes the time series has zero mean
kfilter, mu = self.makeKalmanFilter(bestfit)
kfilter.Filter()
vtime = carmcmcLib.vecD()
if np.isscalar(time):
vtime.append(time)
else:
vtime.extend(time)
ysim = np.asarray(kfilter.Simulate(vtime))
ysim += mu # add mean back into time series
return ysim
def DIC(self):
"""
Calculate the Deviance Information Criterion for the model.
The deviance is -2 * log-likelihood, and the DIC is:
DIC = mean(deviance) + 0.5 * variance(deviance)
"""
deviance = -2.0 * self._samples['loglik']
mean_deviance = np.mean(deviance, axis=0)
effect_npar = 0.5 * np.var(deviance, axis=0)
dic = mean_deviance + effect_npar
return dic
def arrayToVec(array, arrType=carmcmcLib.vecD):
"""
Convert the input numpy array to a python wrapper of a C++ std::vector<double> object.
"""
vec = arrType()
vec.extend(array)
return vec
class Car1Sample(CarmaSample):
def __init__(self, time, y, ysig, sampler, filename=None):
"""
Constructor for a CAR(1) sample. This is a special case of the CarmaSample class for p = 1. As with the
CarmaSample class, this class should never be constructed directly. Instead, one should obtain a Car1Sample
class by calling CarmaModel.run_mcmc().
@param time: The array of time values for the time series.
@param y: The array of measured time series values.
@param ysig: The standard deviation in the measurement noise for the time series.
@param sampler: A wrapper for an instantiated C++ Car1 object.
@param filename: The name of an ascii file containing the MCMC samples.
"""
self.time = time # The time values of the time series
self.y = y # The measured values of the time series
self.ysig = ysig # The standard deviation of the measurement errors of the time series
self.p = 1 # How many AR terms
self.q = 0 # How many MA terms
logpost = np.array(sampler.GetLogLikes())
trace = np.array(sampler.getSamples())
super(CarmaSample, self).__init__(filename=filename, logpost=logpost, trace=trace)
print "Calculating sigma..."
self._sigma_noise()
# add the log-likelihoods
print "Calculating log-likelihoods..."
loglik = np.empty(logpost.size)
for i in xrange(logpost.size):
std_theta = carmcmcLib.vecD()
std_theta.extend(trace[i, :])
loglik[i] = logpost[i] - sampler.getLogPrior(std_theta)
self._samples['loglik'] = loglik
# make the parameter names (i.e., the keys) public so the use knows how to get them
self.parameters = self._samples.keys()
self.newaxis()
def generate_from_trace(self, trace):
names = ['sigma', 'measerr_scale', 'mu', 'log_omega']
if names != self._samples.keys():
self._samples['var'] = trace[:, 0] ** 2
self._samples['measerr_scale'] = trace[:, 1]
self._samples['mu'] = trace[:, 2]
self._samples['log_omega'] = trace[:, 3]
def _ar_roots(self):
print "_ar_roots not supported for CAR1"
return
def _ar_coefs(self):
print "_ar_coefs not supported for CAR1"
return
def _sigma_noise(self):
self._samples['sigma'] = np.sqrt(2.0 * self._samples['var'] * np.exp(self._samples['log_omega']))
def makeKalmanFilter(self, bestfit):
if bestfit == 'map':
# use maximum a posteriori estimate
max_index = self._samples['logpost'].argmax()
sigsqr = (self._samples['sigma'][max_index] ** 2)[0]
mu = self._samples['mu'][max_index][0]
log_omega = self._samples['log_omega'][max_index][0]
elif bestfit == 'median':
# use posterior median estimate
sigsqr = np.median(self._samples['sigma']) ** 2
mu = np.median(self._samples['mu'])
log_omega = np.median(self._samples['log_omega'])
else:
# use posterior mean as the best-fit
sigsqr = np.mean(self._samples['sigma'] ** 2)
mu = np.mean(self._samples['mu'])
log_omega = np.mean(self._samples['log_omega'])
kfilter = carmcmcLib.KalmanFilter1(arrayToVec(self.time),
arrayToVec(self.y - mu),
arrayToVec(self.ysig),
sigsqr,
np.exp(log_omega))
return kfilter, mu
def plot_power_spectrum(self, percentile=68.0, nsamples=None, plot_log=True, color="b", alpha=0.5, sp=None,
doShow=True):
"""
Plot the posterior median and the credibility interval corresponding to percentile of the CAR(1) PSD. This
function returns a tuple containing the lower and upper PSD credibility intervals as a function of
frequency, the median PSD as a function of frequency, and the frequencies.
:rtype : A tuple of numpy arrays, (lower PSD, upper PSD, median PSD, frequencies). If no subplot axes object
is supplied (i.e., if sp = None), then the subplot axes object used will also be returned as the last
element of the tuple.
:param percentile: The percentile of the PSD credibility interval to plot.
:param nsamples: The number of MCMC samples to use to estimate the credibility interval. The default is all
of them. Use less samples for increased speed.
:param plot_log: A boolean. If true, then a logarithmic plot is made.
:param color: The color of the shaded credibility region.
:param alpha: The transparency level.
:param sp: A matplotlib subplot axes object to use.
:param doShow: If true, call plt.show()
"""
sigmas = self._samples['sigma']
log_omegas = self._samples['log_omega']
if nsamples is None:
# Use all of the MCMC samples
nsamples = sigmas.shape[0]
else:
try:
nsamples <= sigmas.shape[0]
except ValueError:
"nsamples must be less than the total number of MCMC samples."
nsamples0 = sigmas.shape[0]
index = np.arange(nsamples) * (nsamples0 / nsamples)
sigmas = sigmas[index]
log_omegas = log_omegas[index]
nfreq = 1000
dt_min = self.time[1:] - self.time[0:self.time.size - 1]
dt_min = dt_min.min()
dt_max = self.time.max() - self.time.min()
# Only plot frequencies corresponding to time scales a factor of 2 shorter and longer than the minimum and
# maximum time scales probed by the time series.
freq_max = 0.5 / dt_min
freq_min = 1.0 / dt_max
frequencies = np.linspace(np.log(freq_min), np.log(freq_max), num=nfreq)
frequencies = np.exp(frequencies)
psd_credint = np.empty((nfreq, 3))
lower = (100.0 - percentile) / 2.0 # lower and upper intervals for credible region
upper = 100.0 - lower
numer = sigmas ** 2
omegasq = np.exp(log_omegas) ** 2
for i in xrange(nfreq):
denom = omegasq + (2. * np.pi * frequencies[i]) ** 2
psd_samples = numer / denom
# Now compute credibility interval for power spectrum
psd_credint[i, 0] = np.percentile(psd_samples, lower, axis=0)
psd_credint[i, 2] = np.percentile(psd_samples, upper, axis=0)
psd_credint[i, 1] = np.median(psd_samples, axis=0)
# Plot the power spectra
if sp == None:
fig = plt.figure()
sp = fig.add_subplot(111)
if plot_log:
# plot the posterior median first
sp.loglog(frequencies, psd_credint[:, 1], color=color)
else:
sp.plot(frequencies, psd_credint[:, 1], color=color)
sp.fill_between(frequencies, psd_credint[:, 2], psd_credint[:, 0], facecolor=color, alpha=alpha)
sp.set_xlim(frequencies.min(), frequencies.max())
sp.set_xlabel('Frequency')
sp.set_ylabel('Power Spectrum')
if doShow:
plt.show()
if sp == None:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies, fig)
else:
return (psd_credint[:, 0], psd_credint[:, 2], psd_credint[:, 1], frequencies)
def get_ar_roots(qpo_width, qpo_centroid):
"""
Return the roots of the characteristic AR(p) polynomial of the CARMA(p,q) process, given the lorentzian widths and
centroids.
:rtype : The roots of the autoregressive polynomial, a numpy array.
:param qpo_width: The widths of the lorentzian functions defining the PSD.
:param qpo_centroid: The centroids of the lorentzian functions defining the PSD. For all values of qpo_centroid
that are greater than zero, the complex conjugate of the root will also be added.
"""
ar_roots = []
for i in xrange(len(qpo_centroid)):
ar_roots.append(qpo_width[i] + 1j * qpo_centroid[i])
if qpo_centroid[i] > 1e-10:
# lorentzian is centered at a frequency > 0, so add complex conjugate of this root
ar_roots.append(np.conjugate(ar_roots[-1]))
if len(qpo_width) - len(qpo_centroid) == 1:
# odd number of lorentzian functions, so add in low-frequency component
ar_roots.append(qpo_width[-1] + 1j * 0.0)
ar_roots = np.array(ar_roots)
return -2.0 * np.pi * ar_roots
def power_spectrum(freq, sigma, ar_coef, ma_coefs=[1.0]):
"""
Return the power spectrum for a CARMA(p,q) process calculated at the input frequencies.
:param freq: The frequencies at which to calculate the PSD.
:param sigma: The standard deviation driving white noise.
:param ar_coef: The CARMA model autoregressive coefficients.
:param ma_coefs: Coefficients of the moving average polynomial
:rtype : The power spectrum at the input frequencies, a numpy array.
"""
try:
len(ma_coefs) <= len(ar_coef)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
ma_poly = np.polyval(ma_coefs[::-1], 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD numerator
ar_poly = np.polyval(ar_coef, 2.0 * np.pi * 1j * freq) # Evaluate the polynomial in the PSD denominator
pspec = sigma ** 2 * np.abs(ma_poly) ** 2 / np.abs(ar_poly) ** 2
return pspec
def carma_variance(sigsqr, ar_roots, ma_coefs=[1.0], lag=0.0):
"""
Return the autocovariance function of a CARMA(p,q) process.
:param sigsqr: The variance in the driving white noise.
:param ar_roots: The roots of the AR characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:param lag: The lag at which to calculate the autocovariance function.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
if len(ma_coefs) < len(ar_roots):
# add extra zeros to end of ma_coefs
nmore = len(ar_roots) - len(ma_coefs)
ma_coefs = np.append(ma_coefs, np.zeros(nmore))
sigma1_variance = 0.0 + 0j
p = ar_roots.size
for k in xrange(p):
denom_product = 1.0 + 0j
for l in xrange(p):
if l != k:
denom_product *= (ar_roots[l] - ar_roots[k]) * (np.conjugate(ar_roots[l]) + ar_roots[k])
denom = -2.0 * denom_product * ar_roots[k].real
ma_sum1 = 0.0 + 0j
ma_sum2 = 0.0 + 0j
for l in xrange(p):
ma_sum1 += ma_coefs[l] * ar_roots[k] ** l
ma_sum2 += ma_coefs[l] * (-1.0 * ar_roots[k]) ** l
numer = ma_sum1 * ma_sum2 * np.exp(ar_roots[k] * abs(lag))
sigma1_variance += numer / denom
return sigsqr * sigma1_variance.real
def car1_process(time, sigsqr, tau):
"""
Generate a CAR(1) process.
:param time: The time values at which to generate the CAR(1) process at.
:param sigsqr: The variance in the driving white noise term.
:param tau: The e-folding (mean-reversion) time scale of the CAR(1) process. Note that tau = -1.0 / ar_root.
:rtype : A numpy array containing the simulated CAR(1) process values at time.
"""
marginal_var = sigsqr * tau / 2.0
y = np.zeros(len(time))
y[0] = np.sqrt(marginal_var) * np.random.standard_normal()
for i in range(1, len(time)):
dt = time[i] - time[i-1]
rho = np.exp(-dt / tau)
conditional_var = marginal_var * (1.0 - rho ** 2)
y[i] = rho * y[i-1] + np.sqrt(conditional_var) * np.random.standard_normal()
return y
def carma_process(time, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Generate a CARMA(p,q) process.
:param time: The time values at which to generate the CARMA(p,q) process at.
:param sigsqr: The variance in the driving white noise term.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
:param ma_coefs: The moving average coefficients.
:rtype : A numpy array containing the simulated CARMA(p,q) process values at time.
"""
try:
len(ma_coefs) <= len(ar_roots)
except ValueError:
"Size of ma_coefs must be less or equal to size of ar_roots."
p = len(ar_roots)
if p == 1:
# generate a CAR(1) process
return car1_process(time, sigsqr, -1.0 / np.asscalar(ar_roots))
if len(ma_coefs) < p:
# add extra zeros to end of ma_coefs
q = len(ma_coefs)
ma_coefs = np.resize(np.array(ma_coefs), len(ar_roots))
ma_coefs[q:] = 0.0
time.sort()
# make sure process is stationary
try:
np.any(ar_roots.real < 0)
except ValueError:
"Process is not stationary, real part of roots must be negative."
# make sure the roots are unique
tol = 1e-8
roots_grid = np.meshgrid(ar_roots, ar_roots)
roots_grid1 = roots_grid[0].ravel()
roots_grid2 = roots_grid[1].ravel()
diff_roots = np.abs(roots_grid1 - roots_grid2) / np.abs(roots_grid1 + roots_grid2)
try:
np.any(diff_roots > tol)
except ValueError:
"Roots are not unique."
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((p, p), dtype=complex)
EigenMat[1, :] = ar_roots
for k in xrange(2, p):
EigenMat[k, :] = ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((p, p), dtype=complex)
for j in xrange(p):
StateVar[:, j] = -sigsqr * Jvector * np.conjugate(Jvector[j]) / (ar_roots + np.conjugate(ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
StateVector = np.matrix(StateVector).T
StateVar = np.matrix(StateVar)
PredictionVar = np.matrix(PredictionVar)
rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
StateTransition = np.zeros_like(StateVector)
KalmanGain = np.zeros_like(StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
kalman_mean = 0.0
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the first time series value
y = np.empty_like(time)
y[0] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# Initialize the innovations, i.e., the KF residuals
innovation = y[0]
for i in xrange(1, time.size):
# First compute the Kalman gain
KalmanGain = PredictionVar * rotated_MA_coefs.H / kalman_var
# update the state vector
StateVector += innovation * KalmanGain
# update the state one-step prediction error variance
PredictionVar -= kalman_var * (KalmanGain * KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = time[i] - time[i - 1]
StateTransition = np.matrix(np.exp(ar_roots * dt)).T
StateVector = np.multiply(StateVector, StateTransition)
# update the predicted state covariance matrix
PredictionVar = np.multiply(StateTransition * StateTransition.H, PredictionVar - StateVar) + StateVar
# now predict the observation and its variance
kalman_mean = np.real(np.asscalar(rotated_MA_coefs * StateVector))
kalman_var = np.real(np.asscalar(rotated_MA_coefs * PredictionVar * rotated_MA_coefs.H))
# simulate the next time series value
y[i] = np.random.normal(kalman_mean, np.sqrt(kalman_var))
# finally, update the innovation
innovation = y[i] - kalman_mean
return y
##################
# Deprecated
class KalmanFilterDeprecated(object):
def __init__(self, time, y, yvar, sigsqr, ar_roots, ma_coefs=[1.0]):
"""
Constructor for Kalman Filter class.
:param time: The time values of the time series.
:param y: The mean-subtracted time series.
:param yvar: The variance in the measurement errors on the time series.
:param sigsqr: The variance of the driving white noise term in the CAR(p) process.
:param ar_roots: The roots of the autoregressive characteristic polynomial.
"""
try:
len(ma_coefs) <= ar_roots.size
except ValueError:
"Order of MA polynomial cannot be larger than order of AR polynomial."
self.time = time
self.y = y
self.yvar = yvar
self.sigsqr = sigsqr
self.ar_roots = ar_roots
self.p = ar_roots.size # order of the CARMA(p,q) process
self.q = len(ma_coefs)
self.ma_coefs = np.append(ma_coefs, np.zeros(self.p - self.q))
def reset(self):
"""
Reset the Kalman Filter to its initial state.
"""
# Setup the matrix of Eigenvectors for the Kalman Filter transition matrix. This allows us to transform
# quantities into the rotated state basis, which makes the computations for the Kalman filter easier and faster.
EigenMat = np.ones((self.p, self.p), dtype=complex)
EigenMat[1, :] = self.ar_roots
for k in xrange(2, self.p):
EigenMat[k, :] = self.ar_roots ** k
# Input vector under the original state space representation
Rvector = np.zeros(self.p, dtype=complex)
Rvector[-1] = 1.0
# Input vector under rotated state space representation
Jvector = solve(EigenMat, Rvector) # J = inv(E) * R
# Compute the vector of moving average coefficients in the rotated state.
rotated_MA_coefs = self.ma_coefs.dot(EigenMat)
# Calculate the stationary covariance matrix of the state vector
StateVar = np.empty((self.p, self.p), dtype=complex)
for j in xrange(self.p):
StateVar[:, j] = -self.sigsqr * Jvector * np.conjugate(Jvector[j]) / \
(self.ar_roots + np.conjugate(self.ar_roots[j]))
# Initialize variance in one-step prediction error and the state vector
PredictionVar = StateVar.copy()
StateVector = np.zeros(self.p, dtype=complex)
# Convert the current state to matrices for convenience, since we'll be doing some Linear algebra.
self._StateVector = np.matrix(StateVector).T
self._StateVar = np.matrix(StateVar)
self._PredictionVar = np.matrix(PredictionVar)
self._rotated_MA_coefs = np.matrix(rotated_MA_coefs) # this is a row vector, so no transpose
self._StateTransition = np.zeros_like(self._StateVector)
self._KalmanGain = np.zeros_like(self._StateVector)
# Initialize the Kalman mean and variance. These are the forecasted values and their variances.
self.kalman_mean = np.empty_like(self.time)
self.kalman_var = np.empty_like(self.time)
self.kalman_mean[0] = 0.0
self.kalman_var[0] = np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) \
+ self.yvar[0]
# Initialize the innovations, i.e., the KF residuals
self._innovation = self.y[0]
self._current_index = 1
def update(self):
"""
Perform one iteration (update) of the Kalman Filter.
"""
# First compute the Kalman gain
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[self._current_index - 1]
# update the state vector
self._StateVector += self._innovation * self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[self._current_index - 1] * (self._KalmanGain * self._KalmanGain.H)
# predict the next state, do element-wise multiplication
dt = self.time[self._current_index] - self.time[self._current_index - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# now predict the observation and its variance
self.kalman_mean[self._current_index] = np.real(np.asscalar(self._rotated_MA_coefs * self._StateVector))
self.kalman_var[self._current_index] = \
np.real(np.asscalar(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
self.kalman_var[self._current_index] += self.yvar[self._current_index]
# finally, update the innovation
self._innovation = self.y[self._current_index] - self.kalman_mean[self._current_index]
self._current_index += 1
def filter(self):
"""
Perform the Kalman Filter on all points of the time series. The kalman mean and variance are returned upon
completion, and are stored in the instantiated KalmanFilter object.
"""
self.reset()
for i in xrange(self.time.size - 1):
self.update()
return self.kalman_mean, self.kalman_var
def predict(self, time_predict):
"""
Return the predicted value of a time series and its standard deviation at the input time given the input
values of the CARMA(p,q) model parameters and a measured time series.
:rtype : A tuple containing the predicted value and its variance.
:param time_predict: The time at which to predict the time series.
"""
try:
self.time.min() > time_predict
except ValueError:
"backcasting currently not supported: time_predict must be greater than self.time.min()"
self.reset()
# find the index where time[ipredict-1] < time_predict < time[ipredict]
ipredict = np.max(np.where(self.time < time_predict)) + 1
for i in xrange(ipredict - 1):
# run the kalman filter for time < time_predict
self.update()
# predict the value of y[time_predict]
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[ipredict - 1]
self._StateVector += self._innovation * self._KalmanGain
self._PredictionVar -= self.kalman_var[ipredict - 1] * (self._KalmanGain * self._KalmanGain.H)
dt = time_predict - self.time[ipredict - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
self._StateVector = np.multiply(self._StateVector, self._StateTransition)
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
ypredict_mean = np.asscalar(np.real(self._rotated_MA_coefs * self._StateVector))
ypredict_var = np.asscalar(np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H))
# start the running statistics for the conditional mean and precision of the predicted time series value, given
# the measured time series
cprecision = 1.0 / ypredict_var
cmean = cprecision * ypredict_mean
if ipredict >= self.time.size:
# we are forecasting (extrapolating) the value, so no need to run interpolation steps below
return ypredict_mean, ypredict_var
# for time > time_predict we need to compute the coefficients for the linear filter, i.e., at time[j]:
# E(y[j]|{y[i]; j<i}) = alpha[j] + beta[j] * ypredict. we do this using recursions similar to the kalman
# filter.
# first set the initial values.
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / ypredict_var
# initialize the coefficients for predicting the state vector at coefs(time_predict|time_predict)
const_state = self._StateVector - self._KalmanGain * ypredict_mean
slope_state = self._KalmanGain
# update the state one-step prediction error variance
self._PredictionVar -= ypredict_var * (self._KalmanGain * self._KalmanGain.H)
# do coefs(time_predict|time_predict) --> coefs(time[i+1]|time_predict)
dt = self.time[ipredict] - time_predict
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# update the predicted state covariance matrix
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for the linear filter at time[ipredict], and compute the variance in the predicted
# y[ipredict]
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
self.kalman_var[ipredict] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[ipredict]
# update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[ipredict]
cmean += slope * (self.y[ipredict] - const) / self.kalman_var[ipredict]
self.const = np.zeros(self.time.size)
self.slope = np.zeros(self.time.size)
self.const[ipredict] = const
self.slope[ipredict] = slope
# now repeat for time > time_predict
for i in xrange(ipredict + 1, self.time.size):
self._KalmanGain = self._PredictionVar * self._rotated_MA_coefs.H / self.kalman_var[i - 1]
# update the state prediction coefficients: coefs(i|i-1) --> coefs(i|i)
const_state += self._KalmanGain * (self.y[i - 1] - const)
slope_state -= self._KalmanGain * slope
# update the state one-step prediction error variance
self._PredictionVar -= self.kalman_var[i - 1] * (self._KalmanGain * self._KalmanGain.H)
# compute the one-step state prediction coefficients: coefs(i|i) --> coefs(i+1|i)
dt = self.time[i] - self.time[i - 1]
self._StateTransition = np.matrix(np.exp(self.ar_roots * dt)).T
const_state = np.multiply(const_state, self._StateTransition)
slope_state = np.multiply(slope_state, self._StateTransition)
# compute the state one-step prediction error variance
self._PredictionVar = np.multiply(self._StateTransition * self._StateTransition.H,
self._PredictionVar - self._StateVar) + self._StateVar
# compute the coefficients for predicting y[i]|y[j],j<i as a function of ypredict
const = np.asscalar(np.real(self._rotated_MA_coefs * const_state))
slope = np.asscalar(np.real(self._rotated_MA_coefs * slope_state))
# compute the variance in predicting y[i]|y[j],j<i
self.kalman_var[i] = \
np.real(self._rotated_MA_coefs * self._PredictionVar * self._rotated_MA_coefs.H) + \
self.yvar[i]
# finally, update the running conditional mean and variance of the predicted time series value
cprecision += slope ** 2 / self.kalman_var[i]
cmean += slope * (self.y[i] - const) / self.kalman_var[i]
self.const[i] = const
self.slope[i] = slope
cvar = 1.0 / cprecision
cmean *= cvar
return cmean, cvar
def simulate(self, time_simulate):
"""
Simulate a time series at the input time values of time_simulate, given the measured time series and input
CARMA(p,q) parameters.
:rtype : A scalar or numpy array, depending on type of time_simulate.
:param time_simulate: The time(s) at which to simulate a random draw of the time series conditional on the
measured time series and the input parameters.
"""
if np.isscalar(time_simulate):
cmean, cvar = self.predict(time_simulate)
ysimulated = np.random.normal(cmean, np.sqrt(cvar))
return ysimulated
else:
# input is array-like, need to simulate values sequentially, adding each value to the measured time series
# as they are simulated
time0 = self.time # save original values
y0 = self.y
yvar0 = self.yvar
ysimulated = np.empty(time_simulate.size)
time_simulate.sort()
for i in xrange(time_simulate.size):
cmean, cvar = self.predict(time_simulate[i])
ysimulated[i] = np.random.normal(cmean, np.sqrt(cvar)) # simulate the time series value
# find the index where time[isimulate-1] < time_simulate < time[isimulate]
isimulate = np.max(np.where(self.time < time_simulate[i])) + 1
# insert the simulated value into the time series array
self.time = np.insert(self.time, isimulate, time_simulate[i])
self.y = np.insert(self.y, isimulate, ysimulated[i])
self.yvar = np.insert(self.yvar, isimulate, 0.0)
# reset measured time series to original values
self.y = y0
self.time = time0
self.yvar = yvar0
return ysimulated
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.