repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
liangz0707/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
umich-dbgroup/foofah | tests/figure_11_b.py | 1 | 1816 | import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import csv
test_result = []
with open('./test_result/ted_batch.csv') as csvfile:
csv_result = csv.reader(csvfile, delimiter=',')
test_result = list(csv_result)
worst_time_list = []
average_time_list = []
row_id = 0
for row in test_result:
if row_id == 0:
row_id += 1
continue
benchmark_id = row[0]
worst_time_temp = []
average_time_temp = []
for i in range(1,6):
if row[i] == '':
worst_time_temp.append(70)
average_time_temp.append(70)
else:
temp = row[i].split(':')
time = temp[0]
result = temp[1]
if result == 's':
worst_time_temp.append(float(time))
average_time_temp.append(float(time))
break
else:
worst_time_temp.append(float(time))
average_time_temp.append(float(time))
worst_time = max(worst_time_temp)
average_time = reduce(lambda x, y: x + y, average_time_temp) / len(average_time_temp)
worst_time_list.append(worst_time)
average_time_list.append(average_time)
worst_time_list.sort()
average_time_list.sort()
x_axis = range(0, 100, 2)
fig, ax = plt.subplots()
ax.plot(x_axis, worst_time_list, color='green', label='worst time')
ax.plot(x_axis, average_time_list, '--', color='red', label='average time')
ax.set_ylim([0, 40])
ax.plot((0, 100), (30, 30), '--', color='black')
ax.set_ylabel('Time (seconds)')
ax.set_xlabel('Percentage of test cases')
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.savefig('./figures/figure_11_b.png')
plt.close(fig) | mit |
KaydenIvanov/Advanced-Hentai | hentai_list.py | 1 | 3583 | # Copyright (C) 2016 André Augusto Leite de Almeida
# This file is part of Advanced Hentai.
#
# Advanced Hentai is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Advanced Hentai is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import random
import hentai_extension
import npyscreen
import operator
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
from PIL import Image
def list_tags(dirpath,method="all"):
items = {}
tags_found = set()
images = [os.path.join(dirpath, x) for x in os.listdir(dirpath)]
print(str(len(images)) + " images found")
for x in images:
if method is "tags":
filetags = hentai_extension.get_tags(x)
elif method is "characters":
filetags = hentai_extension.get_characters(x)
elif method is "works":
filetags = hentai_extension.get_copyright(x)
elif method is "author":
filetags = hentai_extension.get_author(x)
elif method is "all":
filetags = hentai_extension.get_tags(x) + hentai_extension.get_author(x) + hentai_extension.get_characters(x) + hentai_extension.get_copyright(x)
for x in filetags:
if x in items:
items[x] += 1
else:
items[x] = 1
tags_found.add(x)
tags_found.remove('')
print(str(len(tags_found)) + " tags found")
sorted_items = sorted(items.items(), key=operator.itemgetter(1), reverse=True)
fw = open('tags', 'w')
for x in sorted_items:
tag = str(x)[1:-1].replace("'",'').replace(",",':') + '\n'
fw.write(tag)
def list_time(dirpath):
item_dates = {}
all_dates = set()
images = [os.path.join(dirpath, x) for x in os.listdir(dirpath)]
for x in images:
date = os.path.getctime(x)
all_dates.add(date)
dates = sorted(all_dates)
for x in dates:
numbers = sum(x > y for y in dates) + 1
item_dates[x] = numbers
sorted_dates = sorted(item_dates.items(), key=operator.itemgetter(1))
final_dates = []
hentai_numbers = []
for x in sorted_dates:
dict = str(x)[1:-1].replace("'", '').replace(",", ':')
date = dict.split(':')[0]
fdate = float(date)
final_dates.append(fdate)
hentai = dict.split(':')[-1]
hentai_numbers.append(hentai)
hentai_numbers.append(len([name for name in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, name))]))
final_dates.append(time.time())
finally_dates = [dt.datetime.fromtimestamp(ts) for ts in final_dates]
xfmt = mdates.DateFormatter('%d/%m/%Y')
ax = plt.gca()
plt.xticks(rotation=25)
plt.subplots_adjust(bottom=0.2)
ax.xaxis.set_major_formatter(xfmt)
plt.xlabel("Dias")
plt.ylabel("Numero de Imagens")
plt.title('Hentais')
plt.grid(True)
plt.plot_date(finally_dates, hentai_numbers, '-r', label='Hentais')
plt.savefig("graph.png")
img = Image.open('graph.png')
img.show()
| gpl-3.0 |
SebastianRauner/MC-model | doc/conf.py | 3 | 1751 | # -*- coding: utf-8 -*-
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
]
#templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'urbs'
copyright = u'2014, tum-ens'
version = '0.4'
release = '0.4'
exclude_patterns = ['_build']
#pygments_style = 'sphinx'
# HTML output
htmlhelp_basename = 'urbsdoc'
# LaTeX output
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'urbs.tex', u'urbs Documentation',
u'tum-ens', 'manual'),
]
# Manual page output
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'urbs', u'urbs Documentation',
[u'tum-ens'], 1)
]
# Texinfo output
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'urbs', u'urbs Documentation',
u'tum-ens', 'urbs', 'A linear optimisation model for distributed energy systems',
'Miscellaneous'),
]
# Epub output
# Bibliographic Dublin Core info.
epub_title = u'urbs'
epub_author = u'tum-ens'
epub_publisher = u'tum-ens'
epub_copyright = u'2014, tum-ens'
epub_exclude_files = ['search.html']
# Intersphinx
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('http://matplotlib.org/', None)}
| gpl-3.0 |
tagomatech/ETL | nbs/nbs.py | 1 | 8695 | # nbs.py
import requests, json
import pandas as pd
import numpy as np
class NBS_Fetcher(object):
"""Returns data from China's National Bureau of Statistics (NBS)."""
def __init__(self, database, product, geo='', measure='01', period='LAST13'):
"""
Parameters
----------
database : str
NBS database to fetch data from. Possible values:
national data, yearly : 'hgnd'
national data, quarterly : 'hgjd'
national data, monthly : 'hgyd'
province data, yearly : 'fsnd'
province data, quarterly : 'fsjd'
province data, monthly : 'fsyd'
city data, yearly : 'csnd'
city data, monthly : 'csyd'
international data, yearly : 'gjnd'
international data, monthly : 'gjyd'
3 main countries data, monthly : 'gjydsdj'
TODO: Only monthly data at national and province level where dealt with.
Extend code to other geographical and time granularities
product : str
Crude oil (production or processing) or oil product (output). Possible values:
coal : "A030101"
crude oil : "A030102"
natural gas : "A030103"
coalbed gas : "A030104"
lng : "A030105"
crude oil processing : "A030106" # "Processing Volume of Crude oil" => Runs? Throughput?
gasoline : "A030107"
kerosene : "A030108"
diesel oil : "A030109"
fuel oil : "A03010A"
naphtha : "A03010B"
lpg : "A03010C"
petroleum coke : "A03010D"
asphalt : "A03010E"
coke : "A03010F"
electricity : "A03010G"
thermal power : "A03010H"
hydro-electric power : "A03010I"
nuclear power : "A03010J"
wind power : "A03010K"
solar power : "A03010L"
gas : "A03010M"
geo : str, optional
NBS geographical zone to fetch data for. Possible values:
Provinces:
Beijing : "110000"
Tianjin : "120000"
Hebei : "130000"
Shanxi : "140000"
Inner Mongolia : "150000"
Liaoning : "210000"
Jilin : "220000"
Heilongjiang : "230000"
Shanghai : "310000"
Jiangsu : "320000"
Zhejiang : "330000"
Anhui : "340000"
Fujian : "350000"
Jiangxi : "360000"
Shandong : "370000"
Henan : "410000"
Hubei : "420000"
Hunan : "430000"
Guangdong : "440000"
Guangxi : "450000"
Hainan : "460000"
Chongqing : "500000"
Sichuan : "510000"
Guizhou : "520000"
Yunnan : "530000"
Tibet : "540000"
Shaanxi : "610000"
Gansu : "620000"
Qinghai : "630000"
Ningxia : "640000"
Xinjiang : "650000"
TODO: complete the list above for other geographical levels
measure : str
Data type required. Possible values:
Current Period : "01"
Accumulated : "02"
Growth Rate (The same period last year=100) : "03"
Accumulated Growth Rate(%) : "04"
TODO: check data type are always those in the list above across products
period : str
Timestamp or time range. Includes possible values below:
13 most recent months : "LAST13"
24 most recent months : "LAST24"
36 most recent months : "LAST36"
Specific year : "2014", "2015", etc...
Specific time range : "2013-2015", "201303-2015-12"
etc...
TODO: Review the part of the code that creates the np.Series() object as
it is likely to break when only 1 data point is returned
Returns
-------
series
The time series containing the required data
Examples
--------
# Example 1 : China gasoline production, monthly volumes (10000 tons) by month from Jun-18 to May-19
nbs = NBS_Fetcher('hgyd',
'A030107',
measure='01',
period='201806-201905')
data = nbs.get_data()
# Example 2 : Shandong crude oil processing, monthly growth rate for the past 13 months
nbs = NBS_Fetcher('hgyd',
'A030106',
geo='310000',
measure='03',
period='LAST13')
data = nbs.get_data()
"""
self.database = database
self.product = product
self.geo = geo
self.measure = measure
self.period = period
# Structure of json returned from NBS server differ depending on the source database
if self.database[:2] =='hg': # hgyd database (national, monthly data)
self.i = 1
elif self.database[:2] == 'fs': # fsyd database (province, monthly data)
self.i = 2
# URLs
url_root ='http://data.stats.gov.cn/english/easyquery.htm'
self.url_getOtherWds = '{}?m=getOtherWds&dbcode={}&rowcode=zb&colcode=sj&wds=[{{"wdcode":"zb","valuecode":"{}{}"}}]'.format(url_root,
self.database,
self.product,
self.measure)
self.url_QueryData = '{}?m=QueryData&dbcode={}&rowcode=zb&colcode=sj&wds=[{{"wdcode":"reg","valuecode":"{}"}}]&dfwds=[{{"wdcode":"sj","valuecode":"{}"}}]'.format(url_root,
self.database,
self.geo,
self.period)
def get_data(self):
# Fetch data
with requests.Session() as sess:
r_getOtherWds = sess.get(self.url_getOtherWds)
r_QueryData = sess.get(self.url_QueryData)
cont = r_QueryData.content
# Create json
jso = json.loads(cont.decode('utf8'))
# Create series
acc_timestamps = []
acc_values = []
for j in jso['returndata']['datanodes']:
acc_timestamps.append(j['wds'][self.i]['valuecode'])
if j['data']['data'] == 0:
acc_values.append(np.nan)
else:
acc_values.append(j['data']['data'])
ser = pd.Series(data=acc_values,
index=acc_timestamps)
ser.index = [pd.to_datetime(ind, format='%Y%m') for ind in ser.index]
return ser.sort_index()
| mit |
eric-haibin-lin/mxnet | example/gluon/dc_gan/dcgan.py | 6 | 13046 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generate MXNet implementation of Deep Convolutional Generative Adversarial Networks"""
import logging
from datetime import datetime
import argparse
import os
import time
import numpy as np
from matplotlib import pyplot as plt
import matplotlib as mpl
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
from inception_score import get_inception_score
mpl.use('Agg')
def fill_buf(buf, i, img, shape):
"""Reposition the images generated by the generator so that it can be saved as picture matrix.
:param buf: the images metric
:param i: index of each image
:param img: images generated by generator once
:param shape: each image`s shape
:return: Adjust images for output
"""
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
def visual(title, X, name):
"""Image visualization and preservation
:param title: title
:param X: images to visualized
:param name: saved picture`s name
:return:
"""
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:, :, ::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Train a DCgan model for image generation '
'and then use inception_score to metric the result.')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and mnist.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size, default is 64')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector, default is 100')
parser.add_argument('--ngf', type=int, default=64, help='the channel of each generator filter layer, default is 64.')
parser.add_argument('--ndf', type=int, default=64, help='the channel of each descriminator filter layer, '
'default is 64.')
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for, default is 25.')
parser.add_argument('--niter', type=int, default=10, help='save generated images and inception_score per niter iters, '
'default is 100.')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
parser.add_argument('--inception_score', type=bool, default=True, help='To record the inception_score, '
'default is True.')
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
niter = opt.niter
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
batch_size = opt.batch_size
check_point = bool(opt.check_point)
outf = opt.outf
dataset = opt.dataset
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
"""Get the translation of images"""
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2, 0, 1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
# get dataset with the batch_size num each time
def get_dataset(dataset_name):
"""Load the dataset and split it to train/valid data
:param dataset_name: string
Returns:
train_data: int array
training dataset
val_data: int array
valid dataset
"""
# mnist
if dataset == "mnist":
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size, shuffle=False)
# cifar10
elif dataset == "cifar10":
train_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10('./data', train=True, transform=transformer),
batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10('./data', train=False, transform=transformer),
batch_size, shuffle=False)
return train_data, val_data
def get_netG():
"""Get net G"""
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*4) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*2) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
return netG
def get_netD():
"""Get the netD"""
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf*2) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf*4) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf*8) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# state size. 2 x 1 x 1
return netD
def get_configurations(netG, netD):
"""Get configurations for net"""
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
return loss, trainerG, trainerD
def ins_save(inception_score):
# draw the inception_score curve
length = len(inception_score)
x = np.arange(0, length)
plt.figure(figsize=(8.0, 6.0))
plt.plot(x, inception_score)
plt.xlabel("iter/100")
plt.ylabel("inception_score")
plt.savefig("inception_score.png")
# main function
def main():
"""Entry point to dcgan"""
print("|------- new changes!!!!!!!!!")
# to get the dataset and net configuration
train_data, val_data = get_dataset(dataset)
netG = get_netG()
netD = get_netD()
loss, trainerG, trainerD = get_configurations(netG, netD)
# set labels
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
# to metric the network
loss_d = []
loss_g = []
inception_score = []
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
# reshape output from (opt.batch_size, 2, 1, 1) to (opt.batch_size, 2)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label, ], [output, ])
with autograd.record():
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d'
, mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch)
if iter % niter == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf, 'fake_img_iter_%d.png' % iter))
visual('data', data.asnumpy(), name=os.path.join(outf, 'real_img_iter_%d.png' % iter))
# record the metric data
loss_d.append(errD)
loss_g.append(errG)
if opt.inception_score:
score, _ = get_inception_score(fake)
inception_score.append(score)
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f', epoch, name, acc)
logging.info('time: %f', time.time() - tic)
# save check_point
if check_point:
netG.save_parameters(os.path.join(outf, 'generator_epoch_%d.params' %epoch))
netD.save_parameters(os.path.join(outf, 'discriminator_epoch_%d.params' % epoch))
# save parameter
netG.save_parameters(os.path.join(outf, 'generator.params'))
netD.save_parameters(os.path.join(outf, 'discriminator.params'))
# visualization the inception_score as a picture
if opt.inception_score:
ins_save(inception_score)
if __name__ == '__main__':
if opt.inception_score:
print("Use inception_score to metric this DCgan model, the reusult is save as a picture "
"named \"inception_score.png\"!")
main()
| apache-2.0 |
xhochy/arrow | python/pyarrow/filesystem.py | 1 | 14483 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import posixpath
import sys
import urllib.parse
import warnings
from os.path import join as pjoin
import pyarrow as pa
from pyarrow.util import implements, _stringify_path, _is_path_like, _DEPR_MSG
_FS_DEPR_MSG = _DEPR_MSG.format(
"filesystem.LocalFileSystem", "2.0.0", "fs.LocalFileSystem"
)
class FileSystem:
"""
Abstract filesystem interface.
"""
def cat(self, path):
"""
Return contents of file as a bytes object.
Parameters
----------
path : str
File path to read content from.
Returns
-------
contents : bytes
"""
with self.open(path, 'rb') as f:
return f.read()
def ls(self, path):
"""
Return list of file paths.
Parameters
----------
path : str
Directory to list contents from.
"""
raise NotImplementedError
def delete(self, path, recursive=False):
"""
Delete the indicated file or directory.
Parameters
----------
path : str
Path to delete.
recursive : bool, default False
If True, also delete child paths for directories.
"""
raise NotImplementedError
def disk_usage(self, path):
"""
Compute bytes used by all contents under indicated path in file tree.
Parameters
----------
path : str
Can be a file path or directory.
Returns
-------
usage : int
"""
path = _stringify_path(path)
path_info = self.stat(path)
if path_info['kind'] == 'file':
return path_info['size']
total = 0
for root, directories, files in self.walk(path):
for child_path in files:
abspath = self._path_join(root, child_path)
total += self.stat(abspath)['size']
return total
def _path_join(self, *args):
return self.pathsep.join(args)
def stat(self, path):
"""
Information about a filesystem entry.
Returns
-------
stat : dict
"""
raise NotImplementedError('FileSystem.stat')
def rm(self, path, recursive=False):
"""
Alias for FileSystem.delete.
"""
return self.delete(path, recursive=recursive)
def mv(self, path, new_path):
"""
Alias for FileSystem.rename.
"""
return self.rename(path, new_path)
def rename(self, path, new_path):
"""
Rename file, like UNIX mv command.
Parameters
----------
path : str
Path to alter.
new_path : str
Path to move to.
"""
raise NotImplementedError('FileSystem.rename')
def mkdir(self, path, create_parents=True):
"""
Create a directory.
Parameters
----------
path : str
Path to the directory.
create_parents : bool, default True
If the parent directories don't exists create them as well.
"""
raise NotImplementedError
def exists(self, path):
"""
Return True if path exists.
Parameters
----------
path : str
Path to check.
"""
raise NotImplementedError
def isdir(self, path):
"""
Return True if path is a directory.
Parameters
----------
path : str
Path to check.
"""
raise NotImplementedError
def isfile(self, path):
"""
Return True if path is a file.
Parameters
----------
path : str
Path to check.
"""
raise NotImplementedError
def _isfilestore(self):
"""
Returns True if this FileSystem is a unix-style file store with
directories.
"""
raise NotImplementedError
def read_parquet(self, path, columns=None, metadata=None, schema=None,
use_threads=True, use_pandas_metadata=False):
"""
Read Parquet data from path in file system. Can read from a single file
or a directory of files.
Parameters
----------
path : str
Single file path or directory
columns : List[str], optional
Subset of columns to read.
metadata : pyarrow.parquet.FileMetaData
Known metadata to validate files against.
schema : pyarrow.parquet.Schema
Known schema to validate files against. Alternative to metadata
argument.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
table : pyarrow.Table
"""
from pyarrow.parquet import ParquetDataset
dataset = ParquetDataset(path, schema=schema, metadata=metadata,
filesystem=self)
return dataset.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
def open(self, path, mode='rb'):
"""
Open file for reading or writing.
"""
raise NotImplementedError
@property
def pathsep(self):
return '/'
class LocalFileSystem(FileSystem):
_instance = None
def __init__(self):
warnings.warn(_FS_DEPR_MSG, DeprecationWarning, stacklevel=2)
super().__init__()
@classmethod
def _get_instance(cls):
if cls._instance is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls._instance = LocalFileSystem()
return cls._instance
@classmethod
def get_instance(cls):
warnings.warn(_FS_DEPR_MSG, DeprecationWarning, stacklevel=2)
return cls._get_instance()
@implements(FileSystem.ls)
def ls(self, path):
path = _stringify_path(path)
return sorted(pjoin(path, x) for x in os.listdir(path))
@implements(FileSystem.mkdir)
def mkdir(self, path, create_parents=True):
path = _stringify_path(path)
if create_parents:
os.makedirs(path)
else:
os.mkdir(path)
@implements(FileSystem.isdir)
def isdir(self, path):
path = _stringify_path(path)
return os.path.isdir(path)
@implements(FileSystem.isfile)
def isfile(self, path):
path = _stringify_path(path)
return os.path.isfile(path)
@implements(FileSystem._isfilestore)
def _isfilestore(self):
return True
@implements(FileSystem.exists)
def exists(self, path):
path = _stringify_path(path)
return os.path.exists(path)
@implements(FileSystem.open)
def open(self, path, mode='rb'):
"""
Open file for reading or writing.
"""
path = _stringify_path(path)
return open(path, mode=mode)
@property
def pathsep(self):
return os.path.sep
def walk(self, path):
"""
Directory tree generator, see os.walk.
"""
path = _stringify_path(path)
return os.walk(path)
class DaskFileSystem(FileSystem):
"""
Wraps s3fs Dask filesystem implementation like s3fs, gcsfs, etc.
"""
def __init__(self, fs):
warnings.warn(
"The pyarrow.filesystem.DaskFileSystem/S3FSWrapper are deprecated "
"as of pyarrow 3.0.0, and will be removed in a future version.",
DeprecationWarning, stacklevel=2)
self.fs = fs
@implements(FileSystem.isdir)
def isdir(self, path):
raise NotImplementedError("Unsupported file system API")
@implements(FileSystem.isfile)
def isfile(self, path):
raise NotImplementedError("Unsupported file system API")
@implements(FileSystem._isfilestore)
def _isfilestore(self):
"""
Object Stores like S3 and GCSFS are based on key lookups, not true
file-paths.
"""
return False
@implements(FileSystem.delete)
def delete(self, path, recursive=False):
path = _stringify_path(path)
return self.fs.rm(path, recursive=recursive)
@implements(FileSystem.exists)
def exists(self, path):
path = _stringify_path(path)
return self.fs.exists(path)
@implements(FileSystem.mkdir)
def mkdir(self, path, create_parents=True):
path = _stringify_path(path)
if create_parents:
return self.fs.mkdirs(path)
else:
return self.fs.mkdir(path)
@implements(FileSystem.open)
def open(self, path, mode='rb'):
"""
Open file for reading or writing.
"""
path = _stringify_path(path)
return self.fs.open(path, mode=mode)
def ls(self, path, detail=False):
path = _stringify_path(path)
return self.fs.ls(path, detail=detail)
def walk(self, path):
"""
Directory tree generator, like os.walk.
"""
path = _stringify_path(path)
return self.fs.walk(path)
class S3FSWrapper(DaskFileSystem):
@implements(FileSystem.isdir)
def isdir(self, path):
path = _sanitize_s3(_stringify_path(path))
try:
contents = self.fs.ls(path)
if len(contents) == 1 and contents[0] == path:
return False
else:
return True
except OSError:
return False
@implements(FileSystem.isfile)
def isfile(self, path):
path = _sanitize_s3(_stringify_path(path))
try:
contents = self.fs.ls(path)
return len(contents) == 1 and contents[0] == path
except OSError:
return False
def walk(self, path, refresh=False):
"""
Directory tree generator, like os.walk.
Generator version of what is in s3fs, which yields a flattened list of
files.
"""
path = _sanitize_s3(_stringify_path(path))
directories = set()
files = set()
for key in list(self.fs._ls(path, refresh=refresh)):
path = key['Key']
if key['StorageClass'] == 'DIRECTORY':
directories.add(path)
elif key['StorageClass'] == 'BUCKET':
pass
else:
files.add(path)
# s3fs creates duplicate 'DIRECTORY' entries
files = sorted([posixpath.split(f)[1] for f in files
if f not in directories])
directories = sorted([posixpath.split(x)[1]
for x in directories])
yield path, directories, files
for directory in directories:
yield from self.walk(directory, refresh=refresh)
def _sanitize_s3(path):
if path.startswith('s3://'):
return path.replace('s3://', '')
else:
return path
def _ensure_filesystem(fs):
fs_type = type(fs)
# If the arrow filesystem was subclassed, assume it supports the full
# interface and return it
if not issubclass(fs_type, FileSystem):
if "fsspec" in sys.modules:
fsspec = sys.modules["fsspec"]
if isinstance(fs, fsspec.AbstractFileSystem):
# for recent fsspec versions that stop inheriting from
# pyarrow.filesystem.FileSystem, still allow fsspec
# filesystems (which should be compatible with our legacy fs)
return fs
raise OSError('Unrecognized filesystem: {}'.format(fs_type))
else:
return fs
def resolve_filesystem_and_path(where, filesystem=None):
"""
Return filesystem from path which could be an HDFS URI, a local URI,
or a plain filesystem path.
"""
if not _is_path_like(where):
if filesystem is not None:
raise ValueError("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
return filesystem, where
if filesystem is not None:
filesystem = _ensure_filesystem(filesystem)
if isinstance(filesystem, LocalFileSystem):
path = _stringify_path(where)
elif not isinstance(where, str):
raise TypeError(
"Expected string path; path-like objects are only allowed "
"with a local filesystem"
)
else:
path = where
return filesystem, path
path = _stringify_path(where)
parsed_uri = urllib.parse.urlparse(path)
if parsed_uri.scheme == 'hdfs' or parsed_uri.scheme == 'viewfs':
# Input is hdfs URI such as hdfs://host:port/myfile.parquet
netloc_split = parsed_uri.netloc.split(':')
host = netloc_split[0]
if host == '':
host = 'default'
else:
host = parsed_uri.scheme + "://" + host
port = 0
if len(netloc_split) == 2 and netloc_split[1].isnumeric():
port = int(netloc_split[1])
fs = pa.hdfs._connect(host=host, port=port)
fs_path = parsed_uri.path
elif parsed_uri.scheme == 'file':
# Input is local URI such as file:///home/user/myfile.parquet
fs = LocalFileSystem._get_instance()
fs_path = parsed_uri.path
else:
# Input is local path such as /home/user/myfile.parquet
fs = LocalFileSystem._get_instance()
fs_path = path
return fs, fs_path
| apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/tests/test_config.py | 13 | 16910 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import unittest
import warnings
import nose
class TestConfig(unittest.TestCase):
_multiprocess_can_split_ = True
def __init__(self, *args):
super(TestConfig, self).__init__(*args)
from copy import deepcopy
self.cf = pd.core.config
self.gc = deepcopy(getattr(self.cf, '_global_config'))
self.do = deepcopy(getattr(self.cf, '_deprecated_options'))
self.ro = deepcopy(getattr(self.cf, '_registered_options'))
def setUp(self):
setattr(self.cf, '_global_config', {})
setattr(
self.cf, 'options', self.cf.DictWrapper(self.cf._global_config))
setattr(self.cf, '_deprecated_options', {})
setattr(self.cf, '_registered_options', {})
def tearDown(self):
setattr(self.cf, '_global_config', self.gc)
setattr(self.cf, '_deprecated_options', self.do)
setattr(self.cf, '_registered_options', self.ro)
def test_api(self):
# the pandas object exposes the user API
self.assertTrue(hasattr(pd, 'get_option'))
self.assertTrue(hasattr(pd, 'set_option'))
self.assertTrue(hasattr(pd, 'reset_option'))
self.assertTrue(hasattr(pd, 'describe_option'))
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None,12])
v(12)
v(None)
self.assertRaises(ValueError,v,1.1)
def test_register_option(self):
self.cf.register_option('a', 1, 'doc')
# can't register an already registered option
self.assertRaises(KeyError, self.cf.register_option, 'a', 1, 'doc')
# can't register an already registered option
self.assertRaises(KeyError, self.cf.register_option, 'a.b.c.d1', 1,
'doc')
self.assertRaises(KeyError, self.cf.register_option, 'a.b.c.d2', 1,
'doc')
# no python keywords
self.assertRaises(ValueError, self.cf.register_option, 'for', 0)
self.assertRaises(ValueError, self.cf.register_option, 'a.for.b', 0)
# must be valid identifier (ensure attribute access works)
self.assertRaises(ValueError, self.cf.register_option,
'Oh my Goddess!', 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
self.cf.register_option('k.b.c.d1', 1, 'doc')
self.cf.register_option('k.b.c.d2', 1, 'doc')
def test_describe_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b', 1, 'doc2')
self.cf.deprecate_option('b')
self.cf.register_option('c.d.e1', 1, 'doc3')
self.cf.register_option('c.d.e2', 1, 'doc4')
self.cf.register_option('f', 1)
self.cf.register_option('g.h', 1)
self.cf.register_option('k', 2)
self.cf.deprecate_option('g.h', rkey="k")
self.cf.register_option('l', "foo")
# non-existent keys raise KeyError
self.assertRaises(KeyError, self.cf.describe_option, 'no.such.key')
# we can get the description for any key we registered
self.assertTrue(
'doc' in self.cf.describe_option('a', _print_desc=False))
self.assertTrue(
'doc2' in self.cf.describe_option('b', _print_desc=False))
self.assertTrue(
'precated' in self.cf.describe_option('b', _print_desc=False))
self.assertTrue(
'doc3' in self.cf.describe_option('c.d.e1', _print_desc=False))
self.assertTrue(
'doc4' in self.cf.describe_option('c.d.e2', _print_desc=False))
# if no doc is specified we get a default message
# saying "description not available"
self.assertTrue(
'vailable' in self.cf.describe_option('f', _print_desc=False))
self.assertTrue(
'vailable' in self.cf.describe_option('g.h', _print_desc=False))
self.assertTrue(
'precated' in self.cf.describe_option('g.h', _print_desc=False))
self.assertTrue(
'k' in self.cf.describe_option('g.h', _print_desc=False))
# default is reported
self.assertTrue(
'foo' in self.cf.describe_option('l', _print_desc=False))
# current value is reported
self.assertFalse(
'bar' in self.cf.describe_option('l', _print_desc=False))
self.cf.set_option("l","bar")
self.assertTrue(
'bar' in self.cf.describe_option('l', _print_desc=False))
def test_case_insensitive(self):
self.cf.register_option('KanBAN', 1, 'doc')
self.assertTrue(
'doc' in self.cf.describe_option('kanbaN', _print_desc=False))
self.assertEqual(self.cf.get_option('kanBaN'), 1)
self.cf.set_option('KanBan', 2)
self.assertEqual(self.cf.get_option('kAnBaN'), 2)
# gets of non-existent keys fail
self.assertRaises(KeyError, self.cf.get_option, 'no_such_option')
self.cf.deprecate_option('KanBan')
# testing warning with catch_warning was only added in 2.6
self.assertTrue(self.cf._is_deprecated('kAnBaN'))
def test_get_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
# gets of existing keys succeed
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.assertTrue(self.cf.get_option('b.b') is None)
# gets of non-existent keys fail
self.assertRaises(KeyError, self.cf.get_option, 'no_such_option')
def test_set_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.assertTrue(self.cf.get_option('b.b') is None)
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.cf.set_option('b.b', 1.1)
self.assertEqual(self.cf.get_option('a'), 2)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.assertEqual(self.cf.get_option('b.b'), 1.1)
self.assertRaises(KeyError, self.cf.set_option, 'no.such.key', None)
def test_set_option_empty_args(self):
self.assertRaises(ValueError, self.cf.set_option)
def test_set_option_uneven_args(self):
self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
self.assertRaises(ValueError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.assertTrue(self.cf.get_option('b.b') is None)
self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
self.assertEqual(self.cf.get_option('a'), '2')
self.assertTrue(self.cf.get_option('b.c') is None)
self.assertEqual(self.cf.get_option('b.b'), 10.0)
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_text)
self.assertRaises(ValueError, self.cf.register_option, 'a.b.c.d2',
'NO', 'doc', validator=self.cf.is_int)
self.cf.set_option('a', 2) # int is_int
self.cf.set_option('b.c', 'wurld') # str is_str
self.assertRaises(
ValueError, self.cf.set_option, 'a', None) # None not is_int
self.assertRaises(ValueError, self.cf.set_option, 'a', 'ab')
self.assertRaises(ValueError, self.cf.set_option, 'b.c', 1)
def test_reset_option(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.assertEqual(self.cf.get_option('a'), 2)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.cf.reset_option('a')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.cf.reset_option('b.c')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
def test_reset_option_all(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.assertEqual(self.cf.get_option('a'), 2)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.cf.reset_option("all")
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
def test_deprecate_option(self):
import sys
self.cf.deprecate_option(
'foo') # we can deprecate non-existent options
# testing warning with catch_warning was only added in 2.6
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("Need py > 2.6")
self.assertTrue(self.cf._is_deprecated('foo'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
try:
self.cf.get_option('foo')
except KeyError:
pass
else:
self.fail("Nonexistent option didn't raise KeyError")
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'deprecated' in str(w[-1])) # we get the default message
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('foo', 'hullo', 'doc2')
self.cf.deprecate_option('a', removal_ver='nifty_ver')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('a')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the default message
self.assertTrue(
'nifty_ver' in str(w[-1])) # with the removal_ver quoted
self.assertRaises(
KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
self.cf.deprecate_option('b.c', 'zounds!')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('b.c')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'zounds!' in str(w[-1])) # we get the custom message
# test rerouting keys
self.cf.register_option('d.a', 'foo', 'doc2')
self.cf.register_option('d.dep', 'bar', 'doc2')
self.assertEqual(self.cf.get_option('d.a'), 'foo')
self.assertEqual(self.cf.get_option('d.dep'), 'bar')
self.cf.deprecate_option('d.dep', rkey='d.a') # reroute d.dep to d.a
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.cf.get_option('d.dep'), 'foo')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.cf.get_option('d.dep'), 'baz')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
self.cf.register_option('a', 1, "doc1")
self.cf.register_option('b', 2, "doc2")
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b'), 2)
self.cf.set_option('a', 3)
self.cf.set_option('b', 4)
self.assertEqual(self.cf.get_option('a'), 3)
self.assertEqual(self.cf.get_option('b'), 4)
self.assertEqual(self.cf.get_option('base.a'), 3)
self.assertEqual(self.cf.get_option('base.b'), 4)
self.assertTrue(
'doc1' in self.cf.describe_option('base.a', _print_desc=False))
self.assertTrue(
'doc2' in self.cf.describe_option('base.b', _print_desc=False))
self.cf.reset_option('base.a')
self.cf.reset_option('base.b')
with self.cf.config_prefix("base"):
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b'), 2)
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(self.cf.get_option(key))
self.cf.register_option('d.a', 'foo', cb=callback)
self.cf.register_option('d.b', 'foo', cb=callback)
del k[-1], v[-1]
self.cf.set_option("d.a", "fooz")
self.assertEqual(k[-1], "d.a")
self.assertEqual(v[-1], "fooz")
del k[-1], v[-1]
self.cf.set_option("d.b", "boo")
self.assertEqual(k[-1], "d.b")
self.assertEqual(v[-1], "boo")
del k[-1], v[-1]
self.cf.reset_option("d.b")
self.assertEqual(k[-1], "d.b")
def test_set_ContextManager(self):
def eq(val):
self.assertEqual(self.cf.get_option("a"), val)
self.cf.register_option('a', 0)
eq(0)
with self.cf.option_context("a", 15):
eq(15)
with self.cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
self.cf.set_option("a", 17)
eq(17)
def test_attribute_access(self):
holder = []
def f():
options.b = 1
def f2():
options.display = 1
def f3(key):
holder.append(True)
self.cf.register_option('a', 0)
self.cf.register_option('c', 0, cb=f3)
options = self.cf.options
self.assertEqual(options.a, 0)
with self.cf.option_context("a", 15):
self.assertEqual(options.a, 15)
options.a = 500
self.assertEqual(self.cf.get_option("a"), 500)
self.cf.reset_option("a")
self.assertEqual(options.a, self.cf.get_option("a", 0))
self.assertRaises(KeyError, f)
self.assertRaises(KeyError, f2)
# make sure callback kicks when using this form of setting
options.c = 1
self.assertEqual(len(holder), 1)
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pydata/pandas/issues/8514
original_value = 60
context_value = 10
option_name = 'a'
self.cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = self.cf.option_context(option_name, context_value)
self.assertEqual(self.cf.get_option(option_name), original_value)
# Ensure the correct value is available inside the context.
with ctx:
self.assertEqual(self.cf.get_option(option_name), context_value)
# Ensure the current context is reset
self.assertEqual(self.cf.get_option(option_name), original_value)
| gpl-2.0 |
tmills/uda | scripts/reindex_liblinear.py | 1 | 3199 | #!/usr/bin/env python
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
import numpy as np
import sys
from os.path import dirname, join
from uda_common import read_feature_lookup, read_feature_groups
def main(args):
if len(args) < 2:
sys.stderr.write("Error: At least two required arguments: <Training input> <reduced training output file>\n")
sys.exit(-1)
X_train, y_train = load_svmlight_file(args[0])
(num_instances, num_feats) = X_train.shape
data_dir = dirname(args[0])
out_file = args[1]
out_dir = dirname(out_file)
num_feats_allowed = int(out_dir.split("_")[-1].replace('k', '000'))
sys.stderr.write("Argument indicates feature threshold of %d features allowed\n" % (num_feats_allowed))
## Find the low frequency feature we can discard:
feat_counts = X_train.sum(0)
for count in range(1,10):
lowfreq_inds = np.where(feat_counts < count)[1]
num_good_feats = num_feats - len(lowfreq_inds)
sys.stderr.write("Found %d features that occurred >= %d times in the data\n" % (num_good_feats, count))
if num_good_feats < num_feats_allowed:
sys.stderr.write("Breaking at threshold %d\n" % (count))
break
## Get the feature mapping, giving it an offset of -1 to adjust for the fact that
## java wrote liblinear with index 1 and load_svmlight_file gives it to us with
## index 1
old_feat_map = read_feature_lookup(join(data_dir, 'features-lookup.txt'), offset=-1)
old_groups = read_feature_groups(join(data_dir, 'feature-groups.txt'), offset=-1)
new_groups = {}
for domain in old_groups.keys():
new_groups[domain] = []
new_map_file = open(join(out_dir, 'reduced-features-lookup.txt'), 'w')
## Create a reduced feature matrix with only common features:
new_X = np.matrix(np.zeros((num_instances, num_good_feats)))
sys.stderr.write('Building matrix in reduced dimension space\n')
new_ind = 0
for ind in range(num_feats):
feat_name = old_feat_map[ind]
if not ind in lowfreq_inds:
## Use this feature:
# 1) Write its mapping to feature lookup file: (unless it's the bias feature)
if not ind == 0:
new_map_file.write('%s : %d\n' % (feat_name, new_ind))
# 2) Add its column to the data matrix:
new_X[:,new_ind] += X_train[:,ind].toarray()
# 3) Add its index to the group mapping file:
for feat_type in old_groups.keys():
if ind in old_groups[feat_type]:
new_groups[feat_type].append(new_ind)
new_ind += 1
new_map_file.close()
sys.stderr.write('Writing reduced feature groups file\n')
new_group_file = open(join(out_dir, 'reduced-feature-groups.txt'), 'w')
for feat_type in new_groups.keys():
new_group_file.write('%s : %s\n' % (feat_type, ','.join(map(str,new_groups[feat_type]))))
new_group_file.close()
sys.stderr.write('Writing new svmlight file\n')
f = open(out_file, 'w')
dump_svmlight_file(new_X, y_train, f)
f.close()
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| apache-2.0 |
chanceraine/nupic.research | projects/sequence_prediction/mackey_glass/nupic_output.py | 13 | 6035 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'y', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "out_%s" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
rs2/pandas | pandas/tests/indexes/timedeltas/test_timedelta_range.py | 3 | 3258 | import numpy as np
import pytest
from pandas import Timedelta, timedelta_range, to_timedelta
import pandas._testing as tm
from pandas.tseries.offsets import Day, Second
class TestTimedeltas:
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit="D")
result = timedelta_range("0 days", periods=5, freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit="D")
result = timedelta_range("0 days", "10 days", freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit="D") + Second(2) + Day()
result = timedelta_range("1 days, 00:00:02", "5 days, 00:00:02", freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit="D") + Second(2)
result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit="T") * 30
result = timedelta_range("0 days", freq="30T", periods=50)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"periods, freq", [(3, "2D"), (5, "D"), (6, "19H12T"), (7, "16H"), (9, "12H")]
)
def test_linspace_behavior(self, periods, freq):
# GH 20976
result = timedelta_range(start="0 days", end="4 days", periods=periods)
expected = timedelta_range(start="0 days", end="4 days", freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = (
"Of the four parameters: start, end, periods, and freq, "
"exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
timedelta_range(start="0 days")
with pytest.raises(ValueError, match=msg):
timedelta_range(end="5 days")
with pytest.raises(ValueError, match=msg):
timedelta_range(periods=2)
with pytest.raises(ValueError, match=msg):
timedelta_range()
# too many params
with pytest.raises(ValueError, match=msg):
timedelta_range(start="0 days", end="5 days", periods=10, freq="H")
@pytest.mark.parametrize(
"start, end, freq, expected_periods",
[
("1D", "10D", "2D", (10 - 1) // 2 + 1),
("2D", "30D", "3D", (30 - 2) // 3 + 1),
("2s", "50s", "5s", (50 - 2) // 5 + 1),
# tests that worked before GH 33498:
("4D", "16D", "3D", (16 - 4) // 3 + 1),
("8D", "16D", "40s", (16 * 3600 * 24 - 8 * 3600 * 24) // 40 + 1),
],
)
def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_periods):
# GH 33498 only the cases where `(end % freq) == 0` used to fail
res = timedelta_range(start=start, end=end, freq=freq)
assert Timedelta(start) == res[0]
assert Timedelta(end) >= res[-1]
assert len(res) == expected_periods
def test_timedelta_range_infer_freq(self):
# https://github.com/pandas-dev/pandas/issues/35897
result = timedelta_range("0s", "1s", periods=31)
assert result.freq is None
| bsd-3-clause |
dhermes/google-cloud-python | bigquery/tests/system.py | 2 | 73618 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import concurrent.futures
import csv
import datetime
import decimal
import json
import operator
import os
import time
import unittest
import uuid
import re
import six
import pytest
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import IPython
from IPython.utils import io
from IPython.testing import tools
from IPython.terminal import interactiveshell
except ImportError: # pragma: NO COVER
IPython = None
from google.api_core.exceptions import PreconditionFailed
from google.api_core.exceptions import BadRequest
from google.api_core.exceptions import Conflict
from google.api_core.exceptions import Forbidden
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import Table
from google.cloud._helpers import UTC
from google.cloud.bigquery import dbapi
from google.cloud import storage
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
JOB_TIMEOUT = 120 # 2 minutes
WHERE = os.path.abspath(os.path.dirname(__file__))
# Common table data used for many tests.
ROWS = [
("Phred Phlyntstone", 32),
("Bharney Rhubble", 33),
("Wylma Phlyntstone", 29),
("Bhettye Rhubble", 27),
]
HEADER_ROW = ("Full Name", "Age")
SCHEMA = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA = [
bigquery.SchemaField("transaction_time", "TIMESTAMP", mode="REQUIRED"),
bigquery.SchemaField("transaction_id", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("user_email", "STRING", mode="REQUIRED"),
bigquery.SchemaField("store_code", "STRING", mode="REQUIRED"),
bigquery.SchemaField(
"items",
"RECORD",
mode="REPEATED",
fields=[
bigquery.SchemaField("item_code", "STRING", mode="REQUIRED"),
bigquery.SchemaField("quantity", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("comments", "STRING", mode="NULLABLE"),
bigquery.SchemaField("expiration_date", "DATE", mode="REQUIRED"),
],
),
]
def _has_rows(result):
return len(result) > 0
def _make_dataset_id(prefix):
return "%s%s" % (prefix, unique_resource_id())
def _load_json_schema(filename="data/schema.json"):
from google.cloud.bigquery.table import _parse_schema_resource
json_filename = os.path.join(WHERE, filename)
with open(json_filename, "r") as schema_file:
return _parse_schema_resource(json.load(schema_file))
def _rate_limit_exceeded(forbidden):
"""Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
return any(error["reason"] == "rateLimitExceeded" for error in forbidden._errors)
# We need to wait to stay within the rate limits.
# The alternative outcome is a 403 Forbidden response from upstream, which
# they return instead of the more appropriate 429.
# See https://cloud.google.com/bigquery/quota-policy
retry_403 = RetryErrors(Forbidden, error_predicate=_rate_limit_exceeded)
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
CURSOR = None
def setUpModule():
Config.CLIENT = bigquery.Client()
Config.CURSOR = dbapi.connect(Config.CLIENT).cursor()
class TestBigQuery(unittest.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
def _still_in_use(bad_request):
return any(
error["reason"] == "resourceInUse" for error in bad_request._errors
)
retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
retry_409_429 = RetryErrors((Conflict, TooManyRequests))
for doomed in self.to_delete:
if isinstance(doomed, storage.Bucket):
retry_409_429(doomed.delete)(force=True)
elif isinstance(doomed, (Dataset, bigquery.DatasetReference)):
retry_in_use(Config.CLIENT.delete_dataset)(doomed, delete_contents=True)
elif isinstance(doomed, (Table, bigquery.TableReference)):
retry_in_use(Config.CLIENT.delete_table)(doomed)
else:
doomed.delete()
def test_get_service_account_email(self):
client = Config.CLIENT
got = client.get_service_account_email()
self.assertIsInstance(got, six.text_type)
self.assertIn("@", got)
def test_create_dataset(self):
DATASET_ID = _make_dataset_id("create_dataset")
dataset = self.temp_dataset(DATASET_ID)
self.assertTrue(_dataset_exists(dataset))
self.assertEqual(dataset.dataset_id, DATASET_ID)
self.assertEqual(dataset.project, Config.CLIENT.project)
def test_get_dataset(self):
dataset_id = _make_dataset_id("get_dataset")
client = Config.CLIENT
dataset_arg = Dataset(client.dataset(dataset_id))
dataset_arg.friendly_name = "Friendly"
dataset_arg.description = "Description"
dataset = retry_403(client.create_dataset)(dataset_arg)
self.to_delete.append(dataset)
dataset_ref = client.dataset(dataset_id)
# Get with a reference.
got = client.get_dataset(dataset_ref)
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
# Get with a string.
got = client.get_dataset(dataset_id)
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
# Get with a fully-qualified string.
got = client.get_dataset("{}.{}".format(client.project, dataset_id))
self.assertEqual(got.friendly_name, "Friendly")
self.assertEqual(got.description, "Description")
def test_update_dataset(self):
dataset = self.temp_dataset(_make_dataset_id("update_dataset"))
self.assertTrue(_dataset_exists(dataset))
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.description)
self.assertEqual(dataset.labels, {})
dataset.friendly_name = "Friendly"
dataset.description = "Description"
dataset.labels = {"priority": "high", "color": "blue"}
ds2 = Config.CLIENT.update_dataset(
dataset, ("friendly_name", "description", "labels")
)
self.assertEqual(ds2.friendly_name, "Friendly")
self.assertEqual(ds2.description, "Description")
self.assertEqual(ds2.labels, {"priority": "high", "color": "blue"})
ds2.labels = {
"color": "green", # change
"shape": "circle", # add
"priority": None, # delete
}
ds3 = Config.CLIENT.update_dataset(ds2, ["labels"])
self.assertEqual(ds3.labels, {"color": "green", "shape": "circle"})
# If we try to update using d2 again, it will fail because the
# previous update changed the ETag.
ds2.description = "no good"
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_dataset(ds2, ["description"])
def test_list_datasets(self):
datasets_to_create = [
"new" + unique_resource_id(),
"newer" + unique_resource_id(),
"newest" + unique_resource_id(),
]
for dataset_id in datasets_to_create:
self.temp_dataset(dataset_id)
# Retrieve the datasets.
iterator = Config.CLIENT.list_datasets()
all_datasets = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [
dataset
for dataset in all_datasets
if dataset.dataset_id in datasets_to_create
and dataset.project == Config.CLIENT.project
]
self.assertEqual(len(created), len(datasets_to_create))
def test_list_datasets_w_project(self):
# Retrieve datasets from a different project.
iterator = Config.CLIENT.list_datasets(project="bigquery-public-data")
all_datasets = frozenset([dataset.dataset_id for dataset in iterator])
self.assertIn("usa_names", all_datasets)
def test_create_table(self):
dataset = self.temp_dataset(_make_dataset_id("create_table"))
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
def test_create_table_w_time_partitioning_w_clustering_fields(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
dataset = self.temp_dataset(_make_dataset_id("create_table_tp_cf"))
table_id = "test_table"
table_arg = Table(
dataset.table(table_id), schema=TIME_PARTITIONING_CLUSTERING_FIELDS_SCHEMA
)
self.assertFalse(_table_exists(table_arg))
table_arg.time_partitioning = TimePartitioning(field="transaction_time")
table_arg.clustering_fields = ["user_email", "store_code"]
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_id)
time_partitioning = table.time_partitioning
self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
self.assertEqual(time_partitioning.field, "transaction_time")
self.assertEqual(table.clustering_fields, ["user_email", "store_code"])
def test_delete_dataset_with_string(self):
dataset_id = _make_dataset_id("delete_table_true")
dataset_ref = Config.CLIENT.dataset(dataset_id)
retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref))
self.assertTrue(_dataset_exists(dataset_ref))
Config.CLIENT.delete_dataset(dataset_id)
self.assertFalse(_dataset_exists(dataset_ref))
def test_delete_dataset_delete_contents_true(self):
dataset_id = _make_dataset_id("delete_table_true")
dataset = retry_403(Config.CLIENT.create_dataset)(
Dataset(Config.CLIENT.dataset(dataset_id))
)
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
Config.CLIENT.delete_dataset(dataset, delete_contents=True)
self.assertFalse(_table_exists(table))
def test_delete_dataset_delete_contents_false(self):
from google.api_core import exceptions
dataset = self.temp_dataset(_make_dataset_id("delete_table_false"))
table_id = "test_table"
table_arg = Table(dataset.table(table_id), schema=SCHEMA)
retry_403(Config.CLIENT.create_table)(table_arg)
with self.assertRaises(exceptions.BadRequest):
Config.CLIENT.delete_dataset(dataset)
def test_get_table_w_public_dataset(self):
public = "bigquery-public-data"
dataset_id = "samples"
table_id = "shakespeare"
table_ref = DatasetReference(public, dataset_id).table(table_id)
# Get table with reference.
table = Config.CLIENT.get_table(table_ref)
self.assertEqual(table.table_id, table_id)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, public)
schema_names = [field.name for field in table.schema]
self.assertEqual(schema_names, ["word", "word_count", "corpus", "corpus_date"])
# Get table with string.
table = Config.CLIENT.get_table("{}.{}.{}".format(public, dataset_id, table_id))
self.assertEqual(table.table_id, table_id)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, public)
def test_list_partitions(self):
table_ref = DatasetReference(
"bigquery-public-data", "ethereum_blockchain"
).table("blocks")
all_rows = Config.CLIENT.list_partitions(table_ref)
self.assertIn("20180801", all_rows)
self.assertGreater(len(all_rows), 1000)
def test_list_tables(self):
dataset_id = _make_dataset_id("list_tables")
dataset = self.temp_dataset(dataset_id)
# Retrieve tables before any are created for the dataset.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertEqual(all_tables, [])
self.assertIsNone(iterator.next_page_token)
# Insert some tables to be listed.
tables_to_create = [
"new" + unique_resource_id(),
"newer" + unique_resource_id(),
"newest" + unique_resource_id(),
]
for table_name in tables_to_create:
table = Table(dataset.table(table_name), schema=SCHEMA)
created_table = retry_403(Config.CLIENT.create_table)(table)
self.to_delete.insert(0, created_table)
# Retrieve the tables.
iterator = Config.CLIENT.list_tables(dataset)
all_tables = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [
table
for table in all_tables
if (table.table_id in tables_to_create and table.dataset_id == dataset_id)
]
self.assertEqual(len(created), len(tables_to_create))
# List tables with a string ID.
iterator = Config.CLIENT.list_tables(dataset_id)
self.assertGreater(len(list(iterator)), 0)
# List tables with a fully-qualified string ID.
iterator = Config.CLIENT.list_tables(
"{}.{}".format(Config.CLIENT.project, dataset_id)
)
self.assertGreater(len(list(iterator)), 0)
def test_update_table(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.description)
self.assertEqual(table.labels, {})
table.friendly_name = "Friendly"
table.description = "Description"
table.labels = {"priority": "high", "color": "blue"}
table2 = Config.CLIENT.update_table(
table, ["friendly_name", "description", "labels"]
)
self.assertEqual(table2.friendly_name, "Friendly")
self.assertEqual(table2.description, "Description")
self.assertEqual(table2.labels, {"priority": "high", "color": "blue"})
table2.description = None
table2.labels = {
"color": "green", # change
"shape": "circle", # add
"priority": None, # delete
}
table3 = Config.CLIENT.update_table(table2, ["description", "labels"])
self.assertIsNone(table3.description)
self.assertEqual(table3.labels, {"color": "green", "shape": "circle"})
# If we try to update using table2 again, it will fail because the
# previous update changed the ETag.
table2.description = "no good"
with self.assertRaises(PreconditionFailed):
Config.CLIENT.update_table(table2, ["description"])
def test_update_table_schema(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
voter = bigquery.SchemaField("voter", "BOOLEAN", mode="NULLABLE")
schema = table.schema
schema.append(voter)
table.schema = schema
updated_table = Config.CLIENT.update_table(table, ["schema"])
self.assertEqual(len(updated_table.schema), len(schema))
for found, expected in zip(updated_table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
@staticmethod
def _fetch_single_page(table, selected_fields=None):
iterator = Config.CLIENT.list_rows(table, selected_fields=selected_fields)
page = six.next(iterator.pages)
return list(page)
def _create_table_many_columns(self, rowcount):
# Generate a table of maximum width via CREATE TABLE AS SELECT.
# first column is named 'rowval', and has a value from 1..rowcount
# Subsequent column is named col_<N> and contains the value N*rowval,
# where N is between 1 and 9999 inclusive.
dsname = _make_dataset_id("wide_schema")
dataset = self.temp_dataset(dsname)
table_id = "many_columns"
table_ref = dataset.table(table_id)
self.to_delete.insert(0, table_ref)
colprojections = ",".join(
["r * {} as col_{}".format(n, n) for n in range(1, 10000)]
)
sql = """
CREATE TABLE {}.{}
AS
SELECT
r as rowval,
{}
FROM
UNNEST(GENERATE_ARRAY(1,{},1)) as r
""".format(
dsname, table_id, colprojections, rowcount
)
query_job = Config.CLIENT.query(sql)
query_job.result()
self.assertEqual(query_job.statement_type, "CREATE_TABLE_AS_SELECT")
self.assertEqual(query_job.ddl_operation_performed, "CREATE")
self.assertEqual(query_job.ddl_target_table, table_ref)
return table_ref
def test_query_many_columns(self):
# Test working with the widest schema BigQuery supports, 10k columns.
row_count = 2
table_ref = self._create_table_many_columns(row_count)
rows = list(
Config.CLIENT.query(
"SELECT * FROM `{}.{}`".format(table_ref.dataset_id, table_ref.table_id)
)
)
self.assertEqual(len(rows), row_count)
# check field representations adhere to expected values.
correctwidth = 0
badvals = 0
for r in rows:
vals = r._xxx_values
rowval = vals[0]
if len(vals) == 10000:
correctwidth = correctwidth + 1
for n in range(1, 10000):
if vals[n] != rowval * (n):
badvals = badvals + 1
self.assertEqual(correctwidth, row_count)
self.assertEqual(badvals, 0)
def test_insert_rows_then_dump_table(self):
NOW_SECONDS = 1448911495.484366
NOW = datetime.datetime.utcfromtimestamp(NOW_SECONDS).replace(tzinfo=UTC)
ROWS = [
("Phred Phlyntstone", 32, NOW),
("Bharney Rhubble", 33, NOW + datetime.timedelta(seconds=10)),
("Wylma Phlyntstone", 29, NOW + datetime.timedelta(seconds=20)),
("Bhettye Rhubble", 27, None),
]
ROW_IDS = range(len(ROWS))
dataset = self.temp_dataset(_make_dataset_id("insert_rows_then_dump"))
TABLE_ID = "test_table"
schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("now", "TIMESTAMP"),
]
table_arg = Table(dataset.table(TABLE_ID), schema=schema)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
errors = Config.CLIENT.insert_rows(table, ROWS, row_ids=ROW_IDS)
self.assertEqual(len(errors), 0)
rows = ()
# Allow for "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
def test_load_table_from_local_avro_file_then_dump_table(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_NAME = "test_table_avro"
ROWS = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650),
]
dataset = self.temp_dataset(_make_dataset_id("load_local_then_dump"))
table_ref = dataset.table(TABLE_NAME)
table = Table(table_ref)
self.to_delete.insert(0, table)
with open(os.path.join(WHERE, "data", "colors.avro"), "rb") as avrof:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job = Config.CLIENT.load_table_from_file(
avrof, table_ref, job_config=config
)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(ROWS))
table = Config.CLIENT.get_table(table)
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_wavelength = operator.itemgetter(1)
self.assertEqual(
sorted(row_tuples, key=by_wavelength), sorted(ROWS, key=by_wavelength)
)
def test_load_avro_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
table_name = "test_table"
rows = [
("violet", 400),
("indigo", 445),
("blue", 475),
("green", 510),
("yellow", 570),
("orange", 590),
("red", 650),
]
with open(os.path.join(WHERE, "data", "colors.avro"), "rb") as f:
GS_URL = self._write_avro_to_storage(
"bq_load_test" + unique_resource_id(), "colors.avro", f
)
dataset = self.temp_dataset(_make_dataset_id("bq_load_test"))
table_arg = dataset.table(table_name)
table = retry_403(Config.CLIENT.create_table)(Table(table_arg))
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.source_format = SourceFormat.AVRO
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(GS_URL, table_arg, job_config=config)
job.result(timeout=JOB_TIMEOUT)
self.assertEqual(job.output_rows, len(rows))
table = Config.CLIENT.get_table(table)
fetched = self._fetch_single_page(table)
row_tuples = [r.values() for r in fetched]
self.assertEqual(
sorted(row_tuples, key=lambda x: x[1]), sorted(rows, key=lambda x: x[1])
)
def test_load_table_from_uri_then_dump_table(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
TABLE_ID = "test_table"
GS_URL = self._write_csv_to_storage(
"bq_load_test" + unique_resource_id(), "person_ages.csv", HEADER_ROW, ROWS
)
dataset = self.temp_dataset(_make_dataset_id("load_gcs_then_dump"))
table_arg = Table(dataset.table(TABLE_ID), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
config = bigquery.LoadJobConfig()
config.create_disposition = CreateDisposition.CREATE_NEVER
config.skip_leading_rows = 1
config.source_format = SourceFormat.CSV
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_uri(
GS_URL, dataset.table(TABLE_ID), job_config=config
)
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
rows = self._fetch_single_page(table)
row_tuples = [r.values() for r in rows]
by_age = operator.itemgetter(1)
self.assertEqual(sorted(row_tuples, key=by_age), sorted(ROWS, key=by_age))
def test_load_table_from_file_w_explicit_location(self):
# Create a temporary bucket for extract files.
storage_client = storage.Client()
bucket_name = "bq_load_table_eu_extract_test" + unique_resource_id()
bucket = storage_client.bucket(bucket_name)
bucket.location = "eu"
self.to_delete.append(bucket)
bucket.create()
# Create a temporary dataset & table in the EU.
table_bytes = six.BytesIO(b"a,3\nb,2\nc,1\n")
client = Config.CLIENT
dataset = self.temp_dataset(_make_dataset_id("eu_load_file"), location="EU")
table_ref = dataset.table("letters")
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 0
job_config.schema = [
bigquery.SchemaField("letter", "STRING"),
bigquery.SchemaField("value", "INTEGER"),
]
# Load the file to an EU dataset with an EU load job.
load_job = client.load_table_from_file(
table_bytes, table_ref, location="EU", job_config=job_config
)
load_job.result()
job_id = load_job.job_id
# Can get the job from the EU.
load_job = client.get_job(job_id, location="EU")
self.assertEqual(job_id, load_job.job_id)
self.assertEqual("EU", load_job.location)
self.assertTrue(load_job.exists())
# Cannot get the job from the US.
with self.assertRaises(NotFound):
client.get_job(job_id, location="US")
load_job_us = client.get_job(job_id)
load_job_us._properties["jobReference"]["location"] = "US"
self.assertFalse(load_job_us.exists())
with self.assertRaises(NotFound):
load_job_us.reload()
# Can cancel the job from the EU.
self.assertTrue(load_job.cancel())
load_job = client.cancel_job(job_id, location="EU")
self.assertEqual(job_id, load_job.job_id)
self.assertEqual("EU", load_job.location)
# Cannot cancel the job from the US.
with self.assertRaises(NotFound):
client.cancel_job(job_id, location="US")
with self.assertRaises(NotFound):
load_job_us.cancel()
# Can list the table rows.
table = client.get_table(table_ref)
self.assertEqual(table.num_rows, 3)
rows = [(row.letter, row.value) for row in client.list_rows(table)]
self.assertEqual(list(sorted(rows)), [("a", 3), ("b", 2), ("c", 1)])
# Verify location behavior with queries
query_config = bigquery.QueryJobConfig()
query_config.dry_run = True
query_string = "SELECT * FROM `{}.letters` LIMIT 1".format(dataset.dataset_id)
eu_query = client.query(query_string, location="EU", job_config=query_config)
self.assertTrue(eu_query.done)
# Cannot query from US.
with self.assertRaises(BadRequest):
list(client.query(query_string, location="US", job_config=query_config))
# Cannot copy from US.
with self.assertRaises(BadRequest):
client.copy_table(
table_ref, dataset.table("letters2_us"), location="US"
).result()
# Cannot extract from US.
with self.assertRaises(BadRequest):
client.extract_table(
table_ref, "gs://{}/letters-us.csv".format(bucket_name), location="US"
).result()
def _create_storage(self, bucket_name, blob_name):
storage_client = storage.Client()
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
bucket = storage_client.create_bucket(bucket_name)
self.to_delete.append(bucket)
return bucket.blob(blob_name)
def _write_csv_to_storage(self, bucket_name, blob_name, header_row, data_rows):
from google.cloud._testing import _NamedTemporaryFile
blob = self._create_storage(bucket_name, blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(header_row)
writer.writerows(data_rows)
with open(temp.name, "rb") as csv_read:
blob.upload_from_file(csv_read, content_type="text/csv")
self.to_delete.insert(0, blob)
return "gs://{}/{}".format(bucket_name, blob_name)
def _write_avro_to_storage(self, bucket_name, blob_name, avro_file):
blob = self._create_storage(bucket_name, blob_name)
blob.upload_from_file(avro_file, content_type="application/x-avro-binary")
self.to_delete.insert(0, blob)
return "gs://{}/{}".format(bucket_name, blob_name)
def _load_table_for_extract_table(
self, storage_client, rows, bucket_name, blob_name, table
):
from google.cloud._testing import _NamedTemporaryFile
gs_url = "gs://{}/{}".format(bucket_name, blob_name)
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
bucket = storage_client.create_bucket(bucket_name)
self.to_delete.append(bucket)
blob = bucket.blob(blob_name)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(HEADER_ROW)
writer.writerows(rows)
with open(temp.name, "rb") as csv_read:
blob.upload_from_file(csv_read, content_type="text/csv")
self.to_delete.insert(0, blob)
dataset = self.temp_dataset(table.dataset_id)
table_ref = dataset.table(table.table_id)
config = bigquery.LoadJobConfig()
config.autodetect = True
job = Config.CLIENT.load_table_from_uri(gs_url, table_ref, job_config=config)
# TODO(jba): do we need this retry now that we have job.result()?
# Allow for 90 seconds of "warm up" before rows visible. See
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
def test_extract_table(self):
from google.cloud.storage import Client as StorageClient
storage_client = StorageClient()
local_id = unique_resource_id()
bucket_name = "bq_extract_test" + local_id
blob_name = "person_ages.csv"
dataset_id = _make_dataset_id("load_gcs_then_extract")
table_id = "test_table"
table_ref = Config.CLIENT.dataset(dataset_id).table(table_id)
table = Table(table_ref)
self.to_delete.insert(0, table)
self._load_table_for_extract_table(
storage_client, ROWS, bucket_name, blob_name, table_ref
)
bucket = storage_client.bucket(bucket_name)
destination_blob_name = "person_ages_out.csv"
destination = bucket.blob(destination_blob_name)
destination_uri = "gs://{}/person_ages_out.csv".format(bucket_name)
job = Config.CLIENT.extract_table(table_ref, destination_uri)
job.result(timeout=100)
self.to_delete.insert(0, destination)
got = destination.download_as_string().decode("utf-8")
self.assertIn("Bharney Rhubble", got)
def test_copy_table(self):
# If we create a new table to copy from, the test won't work
# because the new rows will be stored in the streaming buffer,
# and copy jobs don't read the streaming buffer.
# We could wait for the streaming buffer to empty, but that could
# take minutes. Instead we copy a small public table.
source_dataset = DatasetReference("bigquery-public-data", "samples")
source_ref = source_dataset.table("shakespeare")
dest_dataset = self.temp_dataset(_make_dataset_id("copy_table"))
dest_ref = dest_dataset.table("destination_table")
job_config = bigquery.CopyJobConfig()
job = Config.CLIENT.copy_table(source_ref, dest_ref, job_config=job_config)
job.result()
dest_table = Config.CLIENT.get_table(dest_ref)
self.to_delete.insert(0, dest_table)
# Just check that we got some rows.
got_rows = self._fetch_single_page(dest_table)
self.assertTrue(len(got_rows) > 0)
def test_job_cancel(self):
DATASET_ID = _make_dataset_id("job_cancel")
JOB_ID_PREFIX = "fetch_" + DATASET_ID
TABLE_NAME = "test_table"
QUERY = "SELECT * FROM %s.%s" % (DATASET_ID, TABLE_NAME)
dataset = self.temp_dataset(DATASET_ID)
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
job = Config.CLIENT.query(QUERY, job_id_prefix=JOB_ID_PREFIX)
job.cancel()
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
# The `cancel` API doesn't leave any reliable traces on
# the status of the job resource, so we can't really assert for
# them here. The best we can do is not that the API call didn't
# raise an error, and that the job completed (in the `retry()`
# above).
def test_get_failed_job(self):
# issue 4246
from google.api_core.exceptions import BadRequest
JOB_ID = "invalid_{}".format(str(uuid.uuid4()))
QUERY = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);"
PARAM = bigquery.ScalarQueryParameter("ts_value", "TIMESTAMP", 1.4810976e9)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = [PARAM]
with self.assertRaises(BadRequest):
Config.CLIENT.query(QUERY, job_id=JOB_ID, job_config=job_config).result()
job = Config.CLIENT.get_job(JOB_ID)
with self.assertRaises(ValueError):
job.query_parameters
def test_query_w_legacy_sql_types(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
zoned = naive.replace(tzinfo=UTC)
examples = [
{"sql": "SELECT 1", "expected": 1},
{"sql": "SELECT 1.3", "expected": 1.3},
{"sql": "SELECT TRUE", "expected": True},
{"sql": 'SELECT "ABC"', "expected": "ABC"},
{"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
{"sql": 'SELECT CAST("%s" AS TIMESTAMP)' % (stamp,), "expected": zoned},
]
for example in examples:
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = True
rows = list(Config.CLIENT.query(example["sql"], job_config=job_config))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def _generate_standard_sql_types_examples(self):
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_microseconds = datetime.datetime(2016, 12, 5, 12, 41, 9, 250000)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
stamp_microseconds = stamp + ".250000"
zoned = naive.replace(tzinfo=UTC)
zoned_microseconds = naive_microseconds.replace(tzinfo=UTC)
numeric = decimal.Decimal("123456789.123456789")
return [
{"sql": "SELECT 1", "expected": 1},
{"sql": "SELECT 1.3", "expected": 1.3},
{"sql": "SELECT TRUE", "expected": True},
{"sql": 'SELECT "ABC"', "expected": "ABC"},
{"sql": 'SELECT CAST("foo" AS BYTES)', "expected": b"foo"},
{"sql": 'SELECT TIMESTAMP "%s"' % (stamp,), "expected": zoned},
{
"sql": 'SELECT TIMESTAMP "%s"' % (stamp_microseconds,),
"expected": zoned_microseconds,
},
{"sql": 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp,), "expected": naive},
{
"sql": 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp_microseconds,),
"expected": naive_microseconds,
},
{"sql": 'SELECT DATE(TIMESTAMP "%s")' % (stamp,), "expected": naive.date()},
{"sql": 'SELECT TIME(TIMESTAMP "%s")' % (stamp,), "expected": naive.time()},
{"sql": 'SELECT NUMERIC "%s"' % (numeric,), "expected": numeric},
{"sql": "SELECT (1, 2)", "expected": {"_field_1": 1, "_field_2": 2}},
{
"sql": "SELECT ((1, 2), (3, 4), 5)",
"expected": {
"_field_1": {"_field_1": 1, "_field_2": 2},
"_field_2": {"_field_1": 3, "_field_2": 4},
"_field_3": 5,
},
},
{"sql": "SELECT [1, 2, 3]", "expected": [1, 2, 3]},
{
"sql": "SELECT ([1, 2], 3, [4, 5])",
"expected": {"_field_1": [1, 2], "_field_2": 3, "_field_3": [4, 5]},
},
{
"sql": "SELECT [(1, 2, 3), (4, 5, 6)]",
"expected": [
{"_field_1": 1, "_field_2": 2, "_field_3": 3},
{"_field_1": 4, "_field_2": 5, "_field_3": 6},
],
},
{
"sql": "SELECT [([1, 2, 3], 4), ([5, 6], 7)]",
"expected": [
{u"_field_1": [1, 2, 3], u"_field_2": 4},
{u"_field_1": [5, 6], u"_field_2": 7},
],
},
{
"sql": "SELECT ARRAY(SELECT STRUCT([1, 2]))",
"expected": [{u"_field_1": [1, 2]}],
},
{"sql": "SELECT ST_GeogPoint(1, 2)", "expected": "POINT(1 2)"},
]
def test_query_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
rows = list(Config.CLIENT.query(example["sql"]))
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def test_query_w_failed_query(self):
from google.api_core.exceptions import BadRequest
with self.assertRaises(BadRequest):
Config.CLIENT.query("invalid syntax;").result()
def test_query_w_wrong_config(self):
from google.cloud.bigquery.job import LoadJobConfig
good_query = "SELECT 1;"
rows = list(Config.CLIENT.query("SELECT 1;").result())
assert rows[0][0] == 1
bad_config = LoadJobConfig()
bad_config.destination = Config.CLIENT.dataset("dset").table("tbl")
with self.assertRaises(Exception):
Config.CLIENT.query(good_query, job_config=bad_config).result()
def test_query_w_timeout(self):
query_job = Config.CLIENT.query(
"SELECT * FROM `bigquery-public-data.github_repos.commits`;",
job_id_prefix="test_query_w_timeout_",
)
with self.assertRaises(concurrent.futures.TimeoutError):
# 1 second is much too short for this query.
query_job.result(timeout=1)
def test_query_statistics(self):
"""
A system test to exercise some of the extended query statistics.
Note: We construct a query that should need at least three stages by
specifying a JOIN query. Exact plan and stats are effectively
non-deterministic, so we're largely interested in confirming values
are present.
"""
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
query_job = Config.CLIENT.query(
"""
SELECT
COUNT(1)
FROM
(
SELECT
year,
wban_number
FROM `bigquery-public-data.samples.gsod`
LIMIT 1000
) lside
INNER JOIN
(
SELECT
year,
state
FROM `bigquery-public-data.samples.natality`
LIMIT 1000
) rside
ON
lside.year = rside.year
""",
location="US",
job_config=job_config,
)
# run the job to completion
query_job.result()
# Assert top-level stats
self.assertFalse(query_job.cache_hit)
self.assertIsNotNone(query_job.destination)
self.assertTrue(query_job.done)
self.assertFalse(query_job.dry_run)
self.assertIsNone(query_job.num_dml_affected_rows)
self.assertEqual(query_job.priority, "INTERACTIVE")
self.assertGreater(query_job.total_bytes_billed, 1)
self.assertGreater(query_job.total_bytes_processed, 1)
self.assertEqual(query_job.statement_type, "SELECT")
self.assertGreater(query_job.slot_millis, 1)
# Make assertions on the shape of the query plan.
plan = query_job.query_plan
self.assertGreaterEqual(len(plan), 3)
first_stage = plan[0]
self.assertIsNotNone(first_stage.start)
self.assertIsNotNone(first_stage.end)
self.assertIsNotNone(first_stage.entry_id)
self.assertIsNotNone(first_stage.name)
self.assertGreater(first_stage.parallel_inputs, 0)
self.assertGreater(first_stage.completed_parallel_inputs, 0)
self.assertGreater(first_stage.shuffle_output_bytes, 0)
self.assertEqual(first_stage.status, "COMPLETE")
# Query plan is a digraph. Ensure it has inter-stage links,
# but not every stage has inputs.
stages_with_inputs = 0
for entry in plan:
if len(entry.input_stages) > 0:
stages_with_inputs = stages_with_inputs + 1
self.assertGreater(stages_with_inputs, 0)
self.assertGreater(len(plan), stages_with_inputs)
def test_dbapi_w_standard_sql_types(self):
examples = self._generate_standard_sql_types_examples()
for example in examples:
Config.CURSOR.execute(example["sql"])
self.assertEqual(Config.CURSOR.rowcount, 1)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1)
self.assertEqual(row[0], example["expected"])
row = Config.CURSOR.fetchone()
self.assertIsNone(row)
def test_dbapi_fetchall(self):
query = "SELECT * FROM UNNEST([(1, 2), (3, 4), (5, 6)])"
for arraysize in range(1, 5):
Config.CURSOR.execute(query)
self.assertEqual(Config.CURSOR.rowcount, 3, "expected 3 rows")
Config.CURSOR.arraysize = arraysize
rows = Config.CURSOR.fetchall()
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, [(1, 2), (3, 4), (5, 6)])
def _load_table_for_dml(self, rows, dataset_id, table_id):
from google.cloud._testing import _NamedTemporaryFile
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
dataset = self.temp_dataset(dataset_id)
greeting = bigquery.SchemaField("greeting", "STRING", mode="NULLABLE")
table_ref = dataset.table(table_id)
table_arg = Table(table_ref, schema=[greeting])
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
with _NamedTemporaryFile() as temp:
with open(temp.name, "w") as csv_write:
writer = csv.writer(csv_write)
writer.writerow(("Greeting",))
writer.writerows(rows)
with open(temp.name, "rb") as csv_read:
config = bigquery.LoadJobConfig()
config.source_format = SourceFormat.CSV
config.skip_leading_rows = 1
config.create_disposition = CreateDisposition.CREATE_NEVER
config.write_disposition = WriteDisposition.WRITE_EMPTY
job = Config.CLIENT.load_table_from_file(
csv_read, table_ref, job_config=config
)
# Retry until done.
job.result(timeout=JOB_TIMEOUT)
self._fetch_single_page(table)
def test_query_w_dml(self):
dataset_name = _make_dataset_id("dml_query")
table_name = "test_table"
self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
query_job = Config.CLIENT.query(
query_template.format(dataset_name, table_name),
job_id_prefix="test_query_w_dml_",
)
query_job.result()
self.assertEqual(query_job.num_dml_affected_rows, 1)
def test_dbapi_w_dml(self):
dataset_name = _make_dataset_id("dml_dbapi")
table_name = "test_table"
self._load_table_for_dml([("Hello World",)], dataset_name, table_name)
query_template = """UPDATE {}.{}
SET greeting = 'Guten Tag'
WHERE greeting = 'Hello World'
"""
Config.CURSOR.execute(
query_template.format(dataset_name, table_name),
job_id="test_dbapi_w_dml_{}".format(str(uuid.uuid4())),
)
self.assertEqual(Config.CURSOR.rowcount, 1)
self.assertIsNone(Config.CURSOR.fetchone())
def test_query_w_query_params(self):
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
question = "What is the answer to life, the universe, and everything?"
question_param = ScalarQueryParameter(
name="question", type_="STRING", value=question
)
answer = 42
answer_param = ScalarQueryParameter(name="answer", type_="INT64", value=answer)
pi = 3.1415926
pi_param = ScalarQueryParameter(name="pi", type_="FLOAT64", value=pi)
pi_numeric = decimal.Decimal("3.141592654")
pi_numeric_param = ScalarQueryParameter(
name="pi_numeric_param", type_="NUMERIC", value=pi_numeric
)
truthy = True
truthy_param = ScalarQueryParameter(name="truthy", type_="BOOL", value=truthy)
beef = b"DEADBEEF"
beef_param = ScalarQueryParameter(name="beef", type_="BYTES", value=beef)
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
naive_param = ScalarQueryParameter(name="naive", type_="DATETIME", value=naive)
naive_date_param = ScalarQueryParameter(
name="naive_date", type_="DATE", value=naive.date()
)
naive_time_param = ScalarQueryParameter(
name="naive_time", type_="TIME", value=naive.time()
)
zoned = naive.replace(tzinfo=UTC)
zoned_param = ScalarQueryParameter(name="zoned", type_="TIMESTAMP", value=zoned)
array_param = ArrayQueryParameter(
name="array_param", array_type="INT64", values=[1, 2]
)
struct_param = StructQueryParameter("hitchhiker", question_param, answer_param)
phred_name = "Phred Phlyntstone"
phred_name_param = ScalarQueryParameter(
name="name", type_="STRING", value=phred_name
)
phred_age = 32
phred_age_param = ScalarQueryParameter(
name="age", type_="INT64", value=phred_age
)
phred_param = StructQueryParameter(None, phred_name_param, phred_age_param)
bharney_name = "Bharney Rhubbyl"
bharney_name_param = ScalarQueryParameter(
name="name", type_="STRING", value=bharney_name
)
bharney_age = 31
bharney_age_param = ScalarQueryParameter(
name="age", type_="INT64", value=bharney_age
)
bharney_param = StructQueryParameter(
None, bharney_name_param, bharney_age_param
)
characters_param = ArrayQueryParameter(
name=None, array_type="RECORD", values=[phred_param, bharney_param]
)
hero_param = StructQueryParameter("hero", phred_name_param, phred_age_param)
sidekick_param = StructQueryParameter(
"sidekick", bharney_name_param, bharney_age_param
)
roles_param = StructQueryParameter("roles", hero_param, sidekick_param)
friends_param = ArrayQueryParameter(
name="friends", array_type="STRING", values=[phred_name, bharney_name]
)
with_friends_param = StructQueryParameter(None, friends_param)
top_left_param = StructQueryParameter(
"top_left",
ScalarQueryParameter("x", "INT64", 12),
ScalarQueryParameter("y", "INT64", 102),
)
bottom_right_param = StructQueryParameter(
"bottom_right",
ScalarQueryParameter("x", "INT64", 22),
ScalarQueryParameter("y", "INT64", 92),
)
rectangle_param = StructQueryParameter(
"rectangle", top_left_param, bottom_right_param
)
examples = [
{
"sql": "SELECT @question",
"expected": question,
"query_parameters": [question_param],
},
{
"sql": "SELECT @answer",
"expected": answer,
"query_parameters": [answer_param],
},
{"sql": "SELECT @pi", "expected": pi, "query_parameters": [pi_param]},
{
"sql": "SELECT @pi_numeric_param",
"expected": pi_numeric,
"query_parameters": [pi_numeric_param],
},
{
"sql": "SELECT @truthy",
"expected": truthy,
"query_parameters": [truthy_param],
},
{"sql": "SELECT @beef", "expected": beef, "query_parameters": [beef_param]},
{
"sql": "SELECT @naive",
"expected": naive,
"query_parameters": [naive_param],
},
{
"sql": "SELECT @naive_date",
"expected": naive.date(),
"query_parameters": [naive_date_param],
},
{
"sql": "SELECT @naive_time",
"expected": naive.time(),
"query_parameters": [naive_time_param],
},
{
"sql": "SELECT @zoned",
"expected": zoned,
"query_parameters": [zoned_param],
},
{
"sql": "SELECT @array_param",
"expected": [1, 2],
"query_parameters": [array_param],
},
{
"sql": "SELECT (@hitchhiker.question, @hitchhiker.answer)",
"expected": ({"_field_1": question, "_field_2": answer}),
"query_parameters": [struct_param],
},
{
"sql": "SELECT "
"((@rectangle.bottom_right.x - @rectangle.top_left.x) "
"* (@rectangle.top_left.y - @rectangle.bottom_right.y))",
"expected": 100,
"query_parameters": [rectangle_param],
},
{
"sql": "SELECT ?",
"expected": [
{"name": phred_name, "age": phred_age},
{"name": bharney_name, "age": bharney_age},
],
"query_parameters": [characters_param],
},
{
"sql": "SELECT @roles",
"expected": {
"hero": {"name": phred_name, "age": phred_age},
"sidekick": {"name": bharney_name, "age": bharney_age},
},
"query_parameters": [roles_param],
},
{
"sql": "SELECT ?",
"expected": {"friends": [phred_name, bharney_name]},
"query_parameters": [with_friends_param],
},
]
for example in examples:
jconfig = QueryJobConfig()
jconfig.query_parameters = example["query_parameters"]
query_job = Config.CLIENT.query(
example["sql"],
job_config=jconfig,
job_id_prefix="test_query_w_query_params",
)
rows = list(query_job.result())
self.assertEqual(len(rows), 1)
self.assertEqual(len(rows[0]), 1)
self.assertEqual(rows[0][0], example["expected"])
def test_dbapi_w_query_parameters(self):
examples = [
{
"sql": "SELECT %(boolval)s",
"expected": True,
"query_parameters": {"boolval": True},
},
{
"sql": 'SELECT %(a "very" weird `name`)s',
"expected": True,
"query_parameters": {'a "very" weird `name`': True},
},
{
"sql": "SELECT %(select)s",
"expected": True,
"query_parameters": {"select": True}, # this name is a keyword
},
{"sql": "SELECT %s", "expected": False, "query_parameters": [False]},
{
"sql": "SELECT %(intval)s",
"expected": 123,
"query_parameters": {"intval": 123},
},
{
"sql": "SELECT %s",
"expected": -123456789,
"query_parameters": [-123456789],
},
{
"sql": "SELECT %(floatval)s",
"expected": 1.25,
"query_parameters": {"floatval": 1.25},
},
{
"sql": "SELECT LOWER(%(strval)s)",
"query_parameters": {"strval": "I Am A String"},
"expected": "i am a string",
},
{
"sql": "SELECT DATE_SUB(%(dateval)s, INTERVAL 1 DAY)",
"query_parameters": {"dateval": datetime.date(2017, 4, 2)},
"expected": datetime.date(2017, 4, 1),
},
{
"sql": "SELECT TIME_ADD(%(timeval)s, INTERVAL 4 SECOND)",
"query_parameters": {"timeval": datetime.time(12, 34, 56)},
"expected": datetime.time(12, 35, 0),
},
{
"sql": ("SELECT DATETIME_ADD(%(datetimeval)s, INTERVAL 53 SECOND)"),
"query_parameters": {
"datetimeval": datetime.datetime(2012, 3, 4, 5, 6, 7)
},
"expected": datetime.datetime(2012, 3, 4, 5, 7, 0),
},
{
"sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
"query_parameters": {
"zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, tzinfo=UTC)
},
"expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
{
"sql": "SELECT TIMESTAMP_TRUNC(%(zoned)s, MINUTE)",
"query_parameters": {
"zoned": datetime.datetime(2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC)
},
"expected": datetime.datetime(2012, 3, 4, 5, 6, 0, tzinfo=UTC),
},
]
for example in examples:
msg = "sql: {} query_parameters: {}".format(
example["sql"], example["query_parameters"]
)
Config.CURSOR.execute(example["sql"], example["query_parameters"])
self.assertEqual(Config.CURSOR.rowcount, 1, msg=msg)
row = Config.CURSOR.fetchone()
self.assertEqual(len(row), 1, msg=msg)
self.assertEqual(row[0], example["expected"], msg=msg)
row = Config.CURSOR.fetchone()
self.assertIsNone(row, msg=msg)
def test_large_query_w_public_data(self):
PUBLIC = "bigquery-public-data"
DATASET_ID = "samples"
TABLE_NAME = "natality"
LIMIT = 1000
SQL = "SELECT * from `{}.{}.{}` LIMIT {}".format(
PUBLIC, DATASET_ID, TABLE_NAME, LIMIT
)
query_job = Config.CLIENT.query(SQL)
rows = list(query_job)
self.assertEqual(len(rows), LIMIT)
def test_query_future(self):
query_job = Config.CLIENT.query("SELECT 1")
iterator = query_job.result(timeout=JOB_TIMEOUT)
row_tuples = [r.values() for r in iterator]
self.assertEqual(row_tuples, [(1,)])
def test_query_iter(self):
import types
query_job = Config.CLIENT.query("SELECT 1")
self.assertIsInstance(iter(query_job), types.GeneratorType)
row_tuples = [r.values() for r in query_job]
self.assertEqual(row_tuples, [(1,)])
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_query_results_to_dataframe(self):
QUERY = """
SELECT id, author, time_ts, dead
from `bigquery-public-data.hacker_news.comments`
LIMIT 10
"""
df = Config.CLIENT.query(QUERY).result().to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 10) # verify the number of rows
column_names = ["id", "author", "time_ts", "dead"]
self.assertEqual(list(df), column_names) # verify the column names
exp_datatypes = {
"id": int,
"author": six.text_type,
"time_ts": pandas.Timestamp,
"dead": bool,
}
for index, row in df.iterrows():
for col in column_names:
# all the schema fields are nullable, so None is acceptable
if not row[col] is None:
self.assertIsInstance(row[col], exp_datatypes[col])
def test_insert_rows_nested_nested(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [("Some value", record)]
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
self.assertEqual(row_tuples, to_insert)
def test_insert_rows_nested_nested_dictionary(self):
# See #2951
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [{"string_col": "Some value", "record_col": record}]
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("issue_2951"))
table_arg = Table(dataset.table(table_id), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
Config.CLIENT.insert_rows(table, to_insert)
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
row_tuples = [r.values() for r in rows]
expected_rows = [("Some value", record)]
self.assertEqual(row_tuples, expected_rows)
def test_create_table_rows_fetch_nested_schema(self):
table_name = "test_table"
dataset = self.temp_dataset(_make_dataset_id("create_table_nested_schema"))
schema = _load_json_schema()
table_arg = Table(dataset.table(table_name), schema=schema)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
self.assertEqual(table.table_id, table_name)
to_insert = []
# Data is in "JSON Lines" format, see http://jsonlines.org/
json_filename = os.path.join(WHERE, "data", "characters.jsonl")
with open(json_filename) as rows_file:
for line in rows_file:
to_insert.append(json.loads(line))
errors = Config.CLIENT.insert_rows_json(table, to_insert)
self.assertEqual(len(errors), 0)
retry = RetryResult(_has_rows, max_tries=8)
fetched = retry(self._fetch_single_page)(table)
fetched_tuples = [f.values() for f in fetched]
self.assertEqual(len(fetched), len(to_insert))
for found, expected in zip(sorted(fetched_tuples), to_insert):
self.assertEqual(found[0], expected["Name"])
self.assertEqual(found[1], int(expected["Age"]))
self.assertEqual(found[2], expected["Weight"])
self.assertEqual(found[3], expected["IsMagic"])
self.assertEqual(len(found[4]), len(expected["Spells"]))
for f_spell, e_spell in zip(found[4], expected["Spells"]):
self.assertEqual(f_spell["Name"], e_spell["Name"])
parts = time.strptime(e_spell["LastUsed"], "%Y-%m-%d %H:%M:%S UTC")
e_used = datetime.datetime(*parts[0:6], tzinfo=UTC)
self.assertEqual(f_spell["LastUsed"], e_used)
self.assertEqual(f_spell["DiscoveredBy"], e_spell["DiscoveredBy"])
self.assertEqual(f_spell["Properties"], e_spell["Properties"])
e_icon = base64.standard_b64decode(e_spell["Icon"].encode("ascii"))
self.assertEqual(f_spell["Icon"], e_icon)
parts = time.strptime(expected["TeaTime"], "%H:%M:%S")
e_teatime = datetime.time(*parts[3:6])
self.assertEqual(found[5], e_teatime)
parts = time.strptime(expected["NextVacation"], "%Y-%m-%d")
e_nextvac = datetime.date(*parts[0:3])
self.assertEqual(found[6], e_nextvac)
parts = time.strptime(expected["FavoriteTime"], "%Y-%m-%dT%H:%M:%S")
e_favtime = datetime.datetime(*parts[0:6])
self.assertEqual(found[7], e_favtime)
self.assertEqual(found[8], decimal.Decimal(expected["FavoriteNumber"]))
def _fetch_dataframe(self, query):
return Config.CLIENT.query(query).result().to_dataframe()
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_nested_table_to_dataframe(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
SF = bigquery.SchemaField
schema = [
SF("string_col", "STRING", mode="NULLABLE"),
SF(
"record_col",
"RECORD",
mode="NULLABLE",
fields=[
SF("nested_string", "STRING", mode="NULLABLE"),
SF("nested_repeated", "INTEGER", mode="REPEATED"),
SF(
"nested_record",
"RECORD",
mode="NULLABLE",
fields=[SF("nested_nested_string", "STRING", mode="NULLABLE")],
),
],
),
]
record = {
"nested_string": "another string value",
"nested_repeated": [0, 1, 2],
"nested_record": {"nested_nested_string": "some deep insight"},
}
to_insert = [{"string_col": "Some value", "record_col": record}]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(table, selected_fields=schema).to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 1) # verify the number of rows
exp_columns = ["string_col", "record_col"]
self.assertEqual(list(df), exp_columns) # verify the column names
row = df.iloc[0]
# verify the row content
self.assertEqual(row["string_col"], "Some value")
self.assertEqual(row["record_col"], record)
# verify that nested data can be accessed with indices/keys
self.assertEqual(row["record_col"]["nested_repeated"][0], 0)
self.assertEqual(
row["record_col"]["nested_record"]["nested_nested_string"],
"some deep insight",
)
def test_list_rows_empty_table(self):
from google.cloud.bigquery.table import RowIterator
dataset_id = _make_dataset_id("empty_table")
dataset = self.temp_dataset(dataset_id)
table_ref = dataset.table("empty_table")
table = Config.CLIENT.create_table(bigquery.Table(table_ref))
# It's a bit silly to list rows for an empty table, but this does
# happen as the result of a DDL query from an IPython magic command.
rows = Config.CLIENT.list_rows(table)
self.assertIsInstance(rows, RowIterator)
self.assertEqual(tuple(rows), ())
def test_list_rows_page_size(self):
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import WriteDisposition
num_items = 7
page_size = 3
num_pages, num_last_page = divmod(num_items, page_size)
SF = bigquery.SchemaField
schema = [SF("string_col", "STRING", mode="NULLABLE")]
to_insert = [{"string_col": "item%d" % i} for i in range(num_items)]
rows = [json.dumps(row) for row in to_insert]
body = six.BytesIO("{}\n".format("\n".join(rows)).encode("ascii"))
table_id = "test_table"
dataset = self.temp_dataset(_make_dataset_id("nested_df"))
table = dataset.table(table_id)
self.to_delete.insert(0, table)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
job_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
job_config.schema = schema
# Load a table using a local JSON file from memory.
Config.CLIENT.load_table_from_file(body, table, job_config=job_config).result()
df = Config.CLIENT.list_rows(table, selected_fields=schema, page_size=page_size)
pages = df.pages
for i in range(num_pages):
page = next(pages)
self.assertEqual(page.num_items, page_size)
page = next(pages)
self.assertEqual(page.num_items, num_last_page)
def temp_dataset(self, dataset_id, location=None):
dataset = Dataset(Config.CLIENT.dataset(dataset_id))
if location:
dataset.location = location
dataset = retry_403(Config.CLIENT.create_dataset)(dataset)
self.to_delete.append(dataset)
return dataset
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
@pytest.mark.skipif(IPython is None, reason="Requires `ipython`")
@pytest.mark.usefixtures("ipython_interactive")
def test_bigquery_magic():
ip = IPython.get_ipython()
ip.extension_manager.load_extension("google.cloud.bigquery")
sql = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10
"""
with io.capture_output() as captured:
result = ip.run_cell_magic("bigquery", "", sql)
lines = re.split("\n|\r", captured.stdout)
# Removes blanks & terminal code (result of display clearing)
updates = list(filter(lambda x: bool(x) and x != "\x1b[2K", lines))
assert re.match("Executing query with job ID: .*", updates[0])
assert all(re.match("Query executing: .*s", line) for line in updates[1:-1])
assert re.match("Query complete after .*s", updates[-1])
assert isinstance(result, pandas.DataFrame)
assert len(result) == 10 # verify row count
assert list(result) == ["url", "view_count"] # verify column names
def _job_done(instance):
return instance.state.lower() == "done"
def _dataset_exists(ds):
try:
Config.CLIENT.get_dataset(DatasetReference(ds.project, ds.dataset_id))
return True
except NotFound:
return False
def _table_exists(t):
try:
tr = DatasetReference(t.project, t.dataset_id).table(t.table_id)
Config.CLIENT.get_table(tr)
return True
except NotFound:
return False
@pytest.fixture(scope="session")
def ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture()
def ipython_interactive(request, ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
| apache-2.0 |
darribas/us_employment_centers | employment_centers_tools.py | 1 | 16633 | '''
Employment center identification tools
...
Copyright (c) 2014, Daniel Arribas-Bel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* The name of Daniel Arribas-Bel may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import os
import numpy as np
import pandas as pd
import pysal as ps
import multiprocessing as mp
import matplotlib.pyplot as plt
from itertools import izip,count
from pyGDsandbox.geo_tools import clip_shp
from pyGDsandbox.dataIO import df2dbf
from pysal.contrib.viz import mapping as maps
from matplotlib.colors import colorConverter as cc
class CentFinder():
"""
Identify employment centers out of LISA results
...
Arguments
---------
lisas : ndarray
Input data with the following structure:
lisas[i,:] = (Ii, z_sim, EI_sim, seI_sim, p_sim, q)
w : W
Spatial weights
threshold : float
Significance level
verbose : Boolean
False by default
Attributes
----------
lisas : ndarray
Original input
ps : ndarray
List of lenght n where every element gets its p_sim if it's
HH/HL, 1.1 otherwise
classes : ndarray
List of lenght n where every element gets 1 if HH, 2 if HL, 0 otherwise
sClus : dict
Mapping of core tract ID (key) to IDs of tracts in the
center
"""
def __init__(self,lisas,w,threshold,verbose=False):
self.verbose=verbose
self.threshold=threshold
results=lisas
self.lisas=lisas
self.n=lisas.shape[0]
self.w=w
classes=np.zeros(self.n)
ps=np.ones(self.n)+0.1
for i in range(self.n):
if results[i,5]==1.:
ps[i]=results[i,4]
classes[i]=1.
if results[i,5]==4.:
ps[i]=results[i,4]
classes[i]=2.
self.classes=classes
mp=min(izip(ps,count())) # (min,map)
self.ps=ps
if mp[0]>self.threshold:
cores=[]
sClus={}
self.sClus=sClus
self.cores=cores
else:
sClus={} #sClus[candidate]=set([cand])
for i in range(self.n):
if ps[i]<=self.threshold: # si candidato
sClus[w.id_order[i]]=set([w.id_order[i]])
# Check contiguity of the clusters
several=1
if len(sClus)<2:
several=0
if several:
flag=1
while flag:
cores=sClus.keys()#tract_id's
for indi,cl_main in enumerate(cores):
if verbose:
print '\nMAIN: ',cl_main,'\n'
trash=[]
for cl_an in cores:
if cl_main != cl_an: #if not the same cluster
if verbose:
print 'analyzing ',cl_an
for tract in sClus[cl_main]:
sn=set(self.w.neighbors[tract])
if sn.intersection(sClus[cl_an]):
sClus[cl_main]=sClus[cl_main].union(sClus[cl_an])
trash.append(cl_an)
if verbose:
print cl_an,' and ',cl_main,' neigh\n'
break
if trash:
for i in trash:
del sClus[i]
break
elif indi==len(cores)-1:
flag=0
sClusNew={}
newCores=[]
for i in sClus:
minp=('ph',1)
for j in sClus[i]:
if results[w.id_order.index(j),4]<minp[1]:
minp=(j,results[w.id_order.index(j),4])
sClusNew[minp[0]]=sClus[i]
newCores.append(minp)
self.sClus=sClusNew
self.cores=newCores
class RLabel:
"""Takes 'all' and obtains a pseudo p-value for statistical difference
of the mean between the two groups in 'all'. Allows for testing against
the universe of observations versus against the remaining observations.
Arguments:
* all=[[values_in_group1],[values_in_group2]]
* useAll = When True test group1 against (group1+group2); when False
test group1 against group2
Attributes:
* mean0
* mean1
* permutations
* diff=difference of means of observed groups
* diffs=list of differences of means for simulated groups
* p_sim
"""
def __init__(self,all,permutations=99999,useAll=False):
allT=all[0]+all[1]
self.permutations=permutations
if useAll:
self.mean0,self.mean1=np.mean(all[0]),np.mean(allT)
else:
self.mean0,self.mean1=np.mean(all[0]),np.mean(all[1])
self.diff=self.mean0-self.mean1
self.absDiff=np.abs(self.diff)
sep=len(all[0])
diffs=[self.__calc(allT,sep,useAll) for i in xrange(permutations)]
self.diffs=diffs
self.p_sim=(sum(diffs >= self.absDiff)+1.)/(permutations+1.)
def __calc(self,allT,sep,useAll=False):
np.random.shuffle(allT)
if useAll:
diff = np.abs(np.mean(allT[:sep])-self.mean1)
else:
diff = np.abs(np.mean(allT[:sep])-np.mean(allT[sep:]))
return diff
def act_on_msa(empShpOut_paths, thr=0.1, permutations=9999):
'''
Perform operations required at the MSA level
NOTE: besides returning `msa`, the method creates a shapefile and a .gal
file for the MSA (if not present in `out_path`) and a shapefile with
centers in `out_path`
...
Arguments
---------
msaEmpShp_path : tuple
Parameters, including:
* emp: DataFrame with MSA data
* shp_path: None/str to shp with all tracts
* out_path: str to out folder
thr : float
[Optional, default to 0.1] Significance level to consider center candidates
permutations : int
[Optional, default to 9999] Number of permutations to
obtain pseudo-significance values
Returns
-------
msa : DataFrame
Table with output information for tracts in msa. This
includes:
* dens_eb
* lisa_i
* lisa_p_sim
* center_id
'''
emp, shp_link, out_link = empShpOut_paths
msa = emp['msa'].min()
# get shape and W
msa_shp_link = out_link + msa + '.shp'
msa_gal_link = msa_shp_link.replace('.shp', '_queen.gal')
try:
fo = ps.open(msa_shp_link)
fo.close()
except:
_ = clip_shp(shp_link, "GISJOIN", list(emp['GISJOIN'].values), \
msa_shp_link)
try:
w = ps.open(msa_gal_link).read()
except:
w = ps.queen_from_shapefile(msa_shp_link, "GISJOIN")
fo = ps.open(msa_gal_link, 'w')
fo.write(w)
fo.close()
print w.weights.keys()[:5]
w.id_order = w.id_order
print w.weights.keys()[:5]
w.transform = 'R'
print w.weights.keys()[:5]
emp = emp.set_index('GISJOIN').reindex(ps.open(\
msa_shp_link.replace('.shp', '.dbf'))\
.by_col('GISJOIN'))\
.fillna(0)
# get EB rate
print w.weights.keys()[:5]
eb = ps.esda.smoothing.Spatial_Empirical_Bayes(\
emp['emp'].values, emp['Shape_area'].values, w)
emp['dens_eb'] = eb.r
emp['dens_eb'] = emp['dens_eb'].fillna(0) #Avoid sliver problem
# LISA
lisa = ps.Moran_Local(emp['dens_eb'].values, w, permutations=permutations)
lisa_pack = pd.DataFrame({'Is': lisa.Is, 'z_sim': lisa.z_sim, \
'EI_sim': lisa.EI_sim, 'seI_sim': lisa.seI_sim, \
'p_sim': lisa.p_sim, 'q': lisa.q})
lisa_pack = lisa_pack[['Is', 'z_sim', 'EI_sim', 'seI_sim', 'p_sim', 'q']]
emp['lisa_i'] = lisa.Is
emp['lisa_p_sim'] = lisa.p_sim
emp['q'] = lisa.q
emp['q'] = emp['q'].map({1: 'HH', 2: 'LH', 3: 'LL', 4: 'LH'})
# Center identification
w.transform = 'O'
c = CentFinder(lisa_pack.values, w, thr)
emp['center_id'] = None
for core in c.sClus:
members = list(c.sClus[core])
emp.ix[members, 'center_id'] = core
# Write out results
if c.sClus:
cent_shp_link = out_link + msa + '_cent.shp'
ids_in_cent = list(emp[emp['center_id'].notnull()].index.values)
_ = clip_shp(msa_shp_link, "GISJOIN", ids_in_cent, cent_shp_link)
_ = df2dbf(emp.reindex(ps.open(cent_shp_link\
.replace('.shp', '.dbf')).by_col('GISJOIN')),
cent_shp_link.replace('.shp', '.dbf'))
emp.index.name = "GISJOIN"
emp.to_csv(out_link + msa + '.csv')
return emp
def load_msa_data(link, y90=False):
"""
Load legacy 1990 and 2000 data
...
Arguments
---------
link : str
Path to original data
y90 : boolean
Flag for 1990 data. If False (default), it assumes the length of
a GISJOIN id is 14 with `G` included; if True, it assumes a
length of 12.
Returns
-------
db : DataFrame
Table indexed to GISJOIN with `emp`, `Shape_area` and `msa` as
columns
"""
def _guess90(id):
id = str(id)
if len(id) == 11: # 48 999 999 999
return 'G' + str(id)
if len(id) == 10: # 06 999 999 999
return 'G0' + str(id)
if len(id) == 13: # 48 999 999 999 00
return 'G' + str(id)
if len(id) == 12: # 06 999 999 999 00
return 'G0' + str(id)
db = pd.read_csv(link, index_col=0)[['emp', 'area']]
db['area'] = db['area'] * 9.2903e-8# Sq. foot to Sq. Km
db['msa'] = 'm' + link.strip('.csv').split('/')[-1]
if y90:
db = db.rename(lambda x: _guess90(x))\
.rename(columns={'area': 'Shape_area'})
else:
db = db.rename(lambda x: 'G' + str(x).zfill(13))\
.rename(columns={'area': 'Shape_area'})
return db
def msafy(cty, cty2msa):
'''
Helper function to assign MSA to a county
...
Arguments
---------
cty : str
County to assign a MSA
cty2msa : dict
Mapping of counties to MSAs
Returns
-------
MSA/None
'''
try:
return cty2msa[cty]
except:
return None
def evol_tab(db):
'''
Build table of evolution. Counts how many MSAs there are in every possible
combination for the three periods in time (1990, 2000, 2010)
...
Arguments
---------
db : DataFrame
Tract table with at least MSA, year and center identifiers as
columns
Returns
-------
tab : DataFrame
List with MSA counts indexed on the three types of MSAs
(no_centers, monocentric, polycentric) across the three years
'''
g = db.groupby(['msa', 'year']).apply(\
lambda x: x.groupby('center_id').ngroups)
simp = g.apply(_monopoly)
tab = simp.unstack().groupby([1990, 2000, 2010]).size()
return tab
def _monopoly(c):
if c == 0:
return 'empty'
elif c == 1:
return 'monocentric'
else:
return 'polycentric'
q_names = {0: 'Insignificant', 1: 'HH', 2: 'LH', 3: 'LL', 4: 'HL'}
q_mapper = {0: cc.to_rgba('0.3'), 1: (0.75, 0, 0, 1), \
2: (1.0, 0.7529411764705882, 0.8, 1), \
3: cc.to_rgba('blue'), \
4: (0, 0.8, 1, 1)}
def plot_lisa(lisa, st, msa, outfile=None, thr=0.05, title=''):
'''
Plot LISA results for MSAs on background map of US states
NOTE: shapefiles hardcoded linked to paths inside the function
...
Arguments
---------
lisa : Moran_Local
LISA object from PySAL
st : str
Path to states shape
msa : str
Path to MSA points shape
outfile : str
[Optional] Path to png to be written
thr : float
[Optional] Significance value to identify clusters
title : str
[Optional] Title for the figure
title : str
Returns
-------
None
'''
sig = (lisa.p_sim < thr) * 1
vals = pd.Series(lisa.q * sig)
states = ps.open(st)
pts = ps.open(msa)
fig = plt.figure(figsize=(9, 5))
base = maps.map_poly_shp(states)
base.set_facecolor('0.85')
base.set_linewidth(0.75)
base.set_edgecolor('0.95')
msas = pd.np.array([pt for pt in ps.open(msa)])
sizes = vals.apply(lambda x: 4 if x==0 else 50)
colors = vals.map(q_mapper)
colors = pd.np.array(list(colors))
pts = []
for clas in q_mapper:
i = vals[vals==clas].index
p = plt.scatter(msas[i, 0], msas[i, 1], s=sizes[i], \
c=colors[i, :], label=q_names[clas])
p.set_linewidth(0)
pts.append(p)
plt.legend(loc=3, ncol=2, fontsize=14, scatterpoints=1, frameon=False)
ax = maps.setup_ax([base] + pts)
#ax = maps.setup_ax(pts)
fig.add_axes(ax)
if title:
plt.title(title)
if outfile:
plt.savefig(outfile)
else:
plt.show()
return None
def load_soc_ec(link):
msa = 'm' + link.split('/')[-1].strip('m').strip('.csv')
db = pd.read_csv(link, index_col=0).rename(_guess)
db['msa'] = msa
return db
def _guess(id):
id = str(id)
if len(id) == 11: # 48 999 999 999
return 'G' + str(id)
if len(id) == 10: # 06 999 999 999
return 'G0' + str(id)
if len(id) == 13: # 48 999 999 999 00
return 'G' + str(id)
if len(id) == 12: # 06 999 999 999 00
return 'G0' + str(id)
def do_rl(msas, years, perms=99):
g90_00 = msas.groupby(years)
out = []
for g in g90_00:
id, g = g
sub = []
for var in g.drop([1990, 2000, 2010], axis=1):
g1 = g[var]
rest = msas.ix[msas.index - g1.index, var]
all = [list(g1.values), list(rest.values)]
r = RLabel(all, permutations=perms, useAll=True)
cell = str(r.mean0) + _sign(r) + _signify(r.p_sim)
s = pd.Series(cell, index=[var])
sub.append(s)
sub = pd.concat(sub)
sub.name = id
out.append(sub)
out = pd.concat(out, axis=1).T
out.index = pd.MultiIndex.from_tuples(out.index, \
names=years)
return out
def _sign(r):
if (r.mean0 - r.mean1) > 0 and r.p_sim < 0.1:
return '+'
elif (r.mean0 - r.mean1) <= 0 and r.p_sim < 0.1:
return '-'
else:
return ''
def _signify(p):
if p < 0.01:
return '***'
elif 0.01 <= p < 0.05:
return '**'
elif 0.05 <= p < 0.1:
return '*'
elif p >= 0.1:
return ''
| bsd-3-clause |
astocko/statsmodels | statsmodels/datasets/scotland/data.py | 25 | 2989 | """Taxation Powers Vote for the Scottish Parliament 1997 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "Taxation Powers Vote for the Scottish Parliamant 1997"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Taxation Powers' Yes Vote for Scottish Parliamanet-1997"""
DESCRLONG = """
This data is based on the example in Gill and describes the proportion of
voters who voted Yes to grant the Scottish Parliament taxation powers.
The data are divided into 32 council districts. This example's explanatory
variables include the amount of council tax collected in pounds sterling as
of April 1997 per two adults before adjustments, the female percentage of
total claims for unemployment benefits as of January, 1998, the standardized
mortality rate (UK is 100), the percentage of labor force participation,
regional GDP, the percentage of children aged 5 to 15, and an interaction term
between female unemployment and the council tax.
The original source files and variable information are included in
/scotland/src/
"""
NOTE = """::
Number of Observations - 32 (1 for each Scottish district)
Number of Variables - 8
Variable name definitions::
YES - Proportion voting yes to granting taxation powers to the
Scottish parliament.
COUTAX - Amount of council tax collected in pounds steling as of
April '97
UNEMPF - Female percentage of total unemployment benefits claims as of
January 1998
MOR - The standardized mortality rate (UK is 100)
ACT - Labor force participation (Short for active)
GDP - GDP per county
AGE - Percentage of children aged 5 to 15 in the county
COUTAX_FEMALEUNEMP - Interaction between COUTAX and UNEMPF
Council district names are included in the data file, though are not
returned by load.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = np.recfromtxt(open(filepath + '/scotvote.csv',"rb"), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))
return data
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/datasets/cpunish/data.py | 25 | 2597 | """US Capital Punishment dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of state executions in 1997"""
DESCRLONG = """This data describes the number of times capital punishment is implemented
at the state level for the year 1997. The outcome variable is the number of
executions. There were executions in 17 states.
Included in the data are explanatory variables for median per capita income
in dollars, the percent of the population classified as living in poverty,
the percent of Black citizens in the population, the rate of violent
crimes per 100,000 residents for 1996, a dummy variable indicating
whether the state is in the South, and (an estimate of) the proportion
of the population with a college degree of some kind.
"""
NOTE = """::
Number of Observations - 17
Number of Variables - 7
Variable name definitions::
EXECUTIONS - Executions in 1996
INCOME - Median per capita income in 1996 dollars
PERPOVERTY - Percent of the population classified as living in poverty
PERBLACK - Percent of black citizens in the population
VC100k96 - Rate of violent crimes per 100,00 residents for 1996
SOUTH - SOUTH == 1 indicates a state in the South
DEGREE - An esimate of the proportion of the state population with a
college degree of some kind
State names are included in the data file, though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/cpunish.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| bsd-3-clause |
SKIRT/PTS | modeling/truncation/analytics.py | 1 | 21605 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.truncation.analytics Contains the TruncationAnalyticsCalculator class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
from scipy import interpolate
# Import the relevant PTS classes and modules
from ...magic.region.list import SkyRegionList, PixelRegionList
from .component import TruncationComponent
from ...core.tools import filesystem as fs
from ...core.basics.log import log
from ...magic.dist_ellipse import distance_ellipse
from ...core.basics.range import RealRange
from ...core.basics.map import Map
from ...magic.core.mask import intersection
from ...core.units.parsing import parse_quantity
from pts.core.tools.utils import lazyproperty
# -----------------------------------------------------------------
mask_names = ["padded", "bad"]
# -----------------------------------------------------------------
class TruncationAnalyticsCalculator(TruncationComponent):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(TruncationAnalyticsCalculator, self).__init__(*args, **kwargs)
# --- Attributes ---
# The statistics for each image
self.statistics = dict()
# The frames and error maps
self.frames = None
self.errormaps = None
self.masks = None
# The sky ellipses
self.sky_ellipses = dict()
# Truncation ellipse
self.ellipses = defaultdict(dict)
# Paths
self.paths = dict()
# The remote host (if needed)
self.remote = None
# The remote cache path
self.remote_truncation_path = None
# Cache paths
self.cache_paths = dict()
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Load the data
self.load_data()
# 3. Create directories
self.create_directories()
# 4. Find the best radius for the truncation
self.calculate_statistics()
# 5. Create the ellipses
self.create_ellipses()
# 6. Writing
self.write()
# 7. Plotting
self.plot()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(TruncationAnalyticsCalculator, self).setup(**kwargs)
# Setup the remote
#self.remote = Remote(host_id=self.environment.cache_host_id)
# Create the cache directory
#directory_name = cached_directory_name_for_single_command(self.environment, self.command_name())
#self.remote_truncation_path = fs.join(self.remote.home_directory, directory_name)
#if self.config.cache:
# if not self.remote.is_directory(self.remote_truncation_path): self.remote.create_directory(self.remote_truncation_path)
# -----------------------------------------------------------------
@property
def nframes(self):
"""
This function ...
:return:
"""
return len(self.frames)
# -----------------------------------------------------------------
def load_data(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the data ...")
# Get the frames
self.frames = self.dataset.get_framelist()
# Get the error maps
self.errormaps = self.dataset.get_errormaplist()
# Loop over all prepared images, get the images
self.masks = dict()
for name in self.dataset.names:
# Get the mask
mask = self.dataset.get_image_masks_union(name, mask_names, strict=False)
# Set the mask
if mask is None: continue
self.masks[name] = mask
# -----------------------------------------------------------------
def create_directories(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating a directory for each image ...")
# Loop over the image
for name in self.frames.names:
# Create directory
path = fs.create_directory_in(self.truncation_analytics_path, name)
# Set path
self.paths[name] = path
# Determine remote cache path
#remote_path = fs.join(self.remote_truncation_path, name)
# Set path
#self.cache_paths[name] = remote_path
# Create directory
#if self.config.cache and not self.remote.is_directory(remote_path): self.remote.create_directory(remote_path)
# -----------------------------------------------------------------
def calculate_statistics(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting the statistics as a function of radius ...")
# Get the angle
center = self.disk_ellipse.center # in sky coordinates
semimajor = self.disk_ellipse.semimajor
semiminor = self.disk_ellipse.semiminor
angle = self.disk_ellipse.angle
# Determine the ratio of semimajor and semiminor
ratio = semiminor / semimajor
# Loop over all prepared images
for name in self.frames.names:
# Get the image
frame = self.dataset.get_frame(name)
# Get the mask
mask = self.dataset.get_image_masks_union(name, mask_names, strict=False)
# Convert center to pixel coordinates
center_pix = center.to_pixel(frame.wcs)
# Create distance ellipse frame
distance_frame = distance_ellipse(frame.shape, center_pix, ratio, angle)
radius_list = []
signal_to_noise_list = []
nmasked_list = []
# Loop over the radii
min_distance = np.min(distance_frame)
max_distance = np.max(distance_frame)
step = (max_distance - min_distance) / float(self.config.nbins)
# Set the first range
radius_range = RealRange(min_distance, min_distance + step)
# Loop, shifting ranges of radius
while True:
# Check the range
if radius_range.min > max_distance: break
# Get the average radius
radius_center = radius_range.center
above_min_mask = distance_frame >= radius_range.min
below_max_mask = distance_frame < radius_range.max
# Make a mask of the pixels corresponding to the current radius range
#range_mask = radius_range.min <= distance_frame < radius_range.max
range_mask = intersection(above_min_mask, below_max_mask)
# Calculate the mean signal to noise in the pixels
signal_to_noises = self.frames[name][range_mask] / self.errormaps[name][range_mask]
# Calcalute the mean signal to noise
signal_to_noise = np.mean(signal_to_noises)
# Make a mask of all the pixels below the center radius
below_mask = distance_frame < radius_center
# Calculate the number of masked pixels
nmasked = np.sum(mask[below_mask])
ntotal = np.sum(below_mask)
rel_nmasked = nmasked / ntotal
# Add point
radius_list.append(radius_center)
signal_to_noise_list.append(signal_to_noise)
nmasked_list.append(rel_nmasked)
# Shift the range
radius_range += step
# Set the statistics for this image
statistics = Map()
statistics.radii = radius_list
statistics.snr = signal_to_noise_list
statistics.nmasked = nmasked_list
self.statistics[name] = statistics
# -----------------------------------------------------------------
@lazyproperty
def factors(self):
"""
This function ..
:return:
"""
return self.config.factor_range.linear(self.config.factor_nvalues, as_list=True)
# -----------------------------------------------------------------
def create_ellipses(self):
"""
This function ....
:return:
"""
# Inform the user
log.info("Creating ellipses ...")
# Loop over the different scale factors
for factor in self.factors:
# Get the scaled ellipse
sky_ellipse = self.disk_ellipse * factor
# Add the sky ellipse
self.sky_ellipses[factor] = sky_ellipse
# Loop over the frames
for name in self.frames.names:
# Convert to pixel ellipse
pixel_ellipse = sky_ellipse.to_pixel(self.frames[name].wcs)
# Add the ellipse
self.ellipses[name][factor] = pixel_ellipse
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the truncation ellipse
self.write_ellipses()
# Write the truncated images
#self.write_images()
# Write low-res truncated image
self.write_lowres_images()
# -----------------------------------------------------------------
def write_ellipses(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the ellipses ...")
# Write sky ellipses
self.write_sky_ellipses()
# Write image ellipses
self.write_image_ellipses()
# -----------------------------------------------------------------
def write_sky_ellipses(self):
"""
Thisf ucntion ...
:return:
"""
# Inform the user
log.info("Writing the truncation ellipse region ...")
# Determine the path to the region file
path = fs.join(self.truncation_path, "ellipses.reg")
# Create the region list
regions = SkyRegionList()
# Loop over the ellipses
for factor in self.sky_ellipses:
# Add ellipse
ellipse = self.sky_ellipses[factor]
ellipse.meta["text"] = str(factor)
regions.append(ellipse)
# Write
regions.saveto(path)
# -----------------------------------------------------------------
def write_image_ellipses(self):
"""
Thisf ucntion ...
:return:
"""
# Inform the user
log.info("Writing the image ellipses ...")
# Loop over the images
for name in self.ellipses:
# Get the path
path = fs.join(self.paths[name], "ellipses.reg")
# Create region list
regions = PixelRegionList()
# Loop over the ellipses
for factor in self.ellipses[name]:
# Add ellipse
ellipse = self.ellipses[name][factor]
ellipse.meta["text"] = str(factor)
regions.append(ellipse)
# Write
regions.saveto(path)
# -----------------------------------------------------------------
# def write_images(self):
#
# """
# This function ...
# :return:
# """
#
# # Inform the user
# log.info("Writing the truncated images ...")
#
# # Loop over the the images
# index = 0
# for name in self.frames.names:
#
# # Debugging
# index += 1
# progress = float(index) / float(self.nframes)
# log.debug("Writing truncated images for the " + name + " image (" + str(index) + " of " + str(self.nframes) + ") ...")
#
# # Loop over the factors
# for factor in self.factors:
#
# # Determine the local path
# filename = str(factor) + ".fits"
# path = fs.join(self.paths[name], filename)
#
# # Determine the remote path
# remote_path = fs.join(self.cache_paths[name], filename)
#
# # Already existing
# if fs.is_file(path):
#
# # Debugging
# log.debug("Truncated " + name + " image with factor " + str(factor) + "is already present: not creating it again")
#
# # Cache if requested
# if self.config.cache:
#
# # Upload
# self.remote.upload_file_to(path, self.cache_paths[name], remove=True)
#
# # Debugging
# log.debug("Truncated " + name + " image with factor " + str(factor) + " has been cached to '" + remote_path + "'")
#
# # Already present remotely
# elif self.remote.is_file(remote_path):
#
# # Debugging
# log.debug("Truncated " + name + " image with factor " + str(factor) + " is already present and cached on remote host '" + self.remote.host_id + "'")
#
# # Not yet present, create truncated image (and cache)
# else:
#
# # Debugging
# log.debug("Creating the truncated " + name + " image with factor " + str(factor) + "...")
#
# # Get the pixel ellipse
# ellipse = self.ellipses[name][factor]
#
# # Convert into mask
# mask = ellipse.to_mask(self.frames[name].xsize, self.frames[name].ysize, invert=True)
#
# # Truncate the frame
# frame = self.frames[name]
# frame[mask] = self.config.truncated_value
#
# # Save
# frame.saveto(path)
#
# # Cache
# if self.config.cache:
#
# # Upload, and remove local file
# remote_path = self.remote.upload_file_to(path, self.cache_paths[name], remove=True)
#
# # Debugging
# log.debug("Truncated " + name + " image with factor " + str(factor) + " has been cached to '" + remote_path + "'")
# -----------------------------------------------------------------
def write_lowres_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing low-resolution truncated images ...")
# Define maximum pixelscale (don't rebin to coordinate systems with a higher pixelscale)
max_pixelscale = parse_quantity("15 arcsec")
# Find the name of the image with the lowest spatial resolution
#reference_name = self.frames.highest_pixelscale_name
reference_name = self.frames.highest_pixelscale_name_below(max_pixelscale)
# Create masks for different factors
masks = dict()
# Loop over the factors
for factor in self.factors:
# Get the pixel ellipse
ellipse = self.ellipses[reference_name][factor]
# Convert into mask
mask = ellipse.to_mask(self.frames[reference_name].xsize, self.frames[reference_name].ysize, invert=True)
masks[factor] = mask
# Debugging
log.debug("Rebinning the maps to lowest resolution ...")
# Rebin
self.frames.rebin_to_name(reference_name)
# Loop over the the images
index = 0
for name in self.frames.names:
# Debugging
index += 1
progress = float(index) / float(self.nframes)
log.debug("Writing low-resolution truncated images for the " + name + " image (" + str(index) + " of " + str(self.nframes) + ") ...")
# Loop over the factors
for factor in self.factors:
# Determine the lowres path
lowres_path = fs.create_directory_in(self.paths[name], "lowres")
# Determine the local path
filename = str(factor) + ".fits"
path = fs.join(lowres_path, filename)
# Check if present
if fs.is_file(path):
log.debug("Low-resolution truncated " + name + " image with factor " + str(factor) + " is already present: not creating it again")
else:
# Debugging
log.debug("Creating the low-resolution truncated " + name + " image with factor " + str(factor) + "...")
# Get the frame and rebin
frame = self.frames[name]
#frame.rebin(reference_wcs)
# Get the mask
mask = masks[factor]
# Truncate the low-res frame
frame[mask] = self.config.truncated_value
# Save
frame.saveto(path)
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot the curves
self.plot_snr()
# One plot for all images
self.plot_snr_at_ellipses()
# Plot nmasked pixels
self.plot_nmasked()
# -----------------------------------------------------------------
def plot_snr(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the snr curves ...")
# Loop over the frame names
for name in self.statistics:
# Get x and y
radii = self.statistics[name].radii
snr = self.statistics[name].snr
# Create plot
plt.figure()
plt.plot(radii, snr)
# Add vertical lines
for factor in self.ellipses[name]:
radius = self.ellipses[name][factor].major
plt.axvline(x=radius)
# Determine the path
path = fs.join(self.paths[name], "snr.pdf")
# Save the figure
plt.savefig(path)
plt.close()
# -----------------------------------------------------------------
def plot_snr_at_ellipses(self):
"""
This function ...
:return:
"""
data = defaultdict(list)
# Loop over the frame names
for name in self.statistics:
# Get x and y
radii = self.statistics[name].radii
snr = self.statistics[name].snr
interpfunc = interpolate.interp1d(radii, snr, kind='linear')
for factor in self.ellipses[name]:
radius = self.ellipses[name][factor].major
# Get corresponding snr
try: snr = interpfunc(radius)
except ValueError: snr = 0.0 # ValueError: A value in x_new is below the interpolation range.
data[factor].append(snr)
# Create plot
plt.figure()
# Add the data to the plot
for factor in data:
snrs = data[factor]
for snr in snrs: plt.plot([factor], [snr], marker='o', markersize=3, color="red")
# Determine the path
path = fs.join(self.truncation_path, "snrs.pdf")
# Save the figure
plt.savefig(path)
plt.close()
# -----------------------------------------------------------------
def plot_nmasked(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the nmasked pixels curves ...")
# Loop over the frame nems
for name in self.statistics:
# Get x and y
radii = self.statistics[name].radii
nmasked = self.statistics[name].nmasked
# Create plot
plt.figure()
plt.plot(radii, nmasked)
# Add vertical lines
for factor in self.ellipses[name]:
radius = self.ellipses[name][factor].major
plt.axvline(x=radius)
# Determine the path
path = fs.join(self.paths[name], "nmasked.pdf")
# Save the figure
plt.savefig(path)
plt.close()
# -----------------------------------------------------------------
| agpl-3.0 |
yunfeilu/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
moutai/scikit-learn | sklearn/utils/multiclass.py | 40 | 12966 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
Hiyorimi/scikit-image | skimage/feature/tests/test_hog.py | 11 | 7584 | import os
import numpy as np
from scipy import ndimage as ndi
import skimage as si
from skimage import color
from skimage import data
from skimage import feature
from skimage import img_as_float
from skimage import draw
from numpy.testing import (assert_raises,
assert_almost_equal,
)
def test_histogram_of_oriented_gradients_output_size():
img = img_as_float(data.astronaut()[:256, :].mean(axis=2))
fd = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(1, 1))
assert len(fd) == 9 * (256 // 8) * (512 // 8)
def test_histogram_of_oriented_gradients_output_correctness():
img = color.rgb2gray(data.astronaut())
correct_output = np.load(os.path.join(si.data_dir, 'astronaut_GRAY_hog.npy'))
output = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), feature_vector=True,
transform_sqrt=False, visualise=False)
assert_almost_equal(output, correct_output)
def test_hog_image_size_cell_size_mismatch():
image = data.camera()[:150, :200]
fd = feature.hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(1, 1))
assert len(fd) == 9 * (150 // 8) * (200 // 8)
def test_hog_color_image_unsupported_error():
image = np.zeros((20, 20, 3))
assert_raises(ValueError, feature.hog, image)
def test_hog_basic_orientations_and_data_types():
# scenario:
# 1) create image (with float values) where upper half is filled by
# zeros, bottom half by 100
# 2) create unsigned integer version of this image
# 3) calculate feature.hog() for both images, both with 'transform_sqrt'
# option enabled and disabled
# 4) verify that all results are equal where expected
# 5) verify that computed feature vector is as expected
# 6) repeat the scenario for 90, 180 and 270 degrees rotated images
# size of testing image
width = height = 35
image0 = np.zeros((height, width), dtype='float')
image0[height // 2:] = 100
for rot in range(4):
# rotate by 0, 90, 180 and 270 degrees
image_float = np.rot90(image0, rot)
# create uint8 image from image_float
image_uint8 = image_float.astype('uint8')
(hog_float, hog_img_float) = feature.hog(
image_float, orientations=4, pixels_per_cell=(8, 8),
cells_per_block=(1, 1), visualise=True, transform_sqrt=False)
(hog_uint8, hog_img_uint8) = feature.hog(
image_uint8, orientations=4, pixels_per_cell=(8, 8),
cells_per_block=(1, 1), visualise=True, transform_sqrt=False)
(hog_float_norm, hog_img_float_norm) = feature.hog(
image_float, orientations=4, pixels_per_cell=(8, 8),
cells_per_block=(1, 1), visualise=True, transform_sqrt=True)
(hog_uint8_norm, hog_img_uint8_norm) = feature.hog(
image_uint8, orientations=4, pixels_per_cell=(8, 8),
cells_per_block=(1, 1), visualise=True, transform_sqrt=True)
# set to True to enable manual debugging with graphical output,
# must be False for automatic testing
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(2, 3, 1)
plt.imshow(image_float)
plt.colorbar()
plt.title('image')
plt.subplot(2, 3, 2)
plt.imshow(hog_img_float)
plt.colorbar()
plt.title('HOG result visualisation (float img)')
plt.subplot(2, 3, 5)
plt.imshow(hog_img_uint8)
plt.colorbar()
plt.title('HOG result visualisation (uint8 img)')
plt.subplot(2, 3, 3)
plt.imshow(hog_img_float_norm)
plt.colorbar()
plt.title('HOG result (transform_sqrt) visualisation (float img)')
plt.subplot(2, 3, 6)
plt.imshow(hog_img_uint8_norm)
plt.colorbar()
plt.title('HOG result (transform_sqrt) visualisation (uint8 img)')
plt.show()
# results (features and visualisation) for float and uint8 images must
# be almost equal
assert_almost_equal(hog_float, hog_uint8)
assert_almost_equal(hog_img_float, hog_img_uint8)
# resulting features should be almost equal when 'transform_sqrt' is enabled
# or disabled (for current simple testing image)
assert_almost_equal(hog_float, hog_float_norm, decimal=4)
assert_almost_equal(hog_float, hog_uint8_norm, decimal=4)
# reshape resulting feature vector to matrix with 4 columns (each
# corresponding to one of 4 directions); only one direction should
# contain nonzero values (this is manually determined for testing
# image)
actual = np.max(hog_float.reshape(-1, 4), axis=0)
if rot in [0, 2]:
# image is rotated by 0 and 180 degrees
desired = [0, 0, 1, 0]
elif rot in [1, 3]:
# image is rotated by 90 and 270 degrees
desired = [1, 0, 0, 0]
else:
raise Exception('Result is not determined for this rotation.')
assert_almost_equal(actual, desired, decimal=2)
def test_hog_orientations_circle():
# scenario:
# 1) create image with blurred circle in the middle
# 2) calculate feature.hog()
# 3) verify that the resulting feature vector contains uniformly
# distributed values for all orientations, i.e. no orientation is
# lost or emphasized
# 4) repeat the scenario for other 'orientations' option
# size of testing image
width = height = 100
image = np.zeros((height, width))
rr, cc = draw.circle(int(height / 2), int(width / 2), int(width / 3))
image[rr, cc] = 100
image = ndi.gaussian_filter(image, 2)
for orientations in range(2, 15):
(hog, hog_img) = feature.hog(image, orientations=orientations,
pixels_per_cell=(8, 8),
cells_per_block=(1, 1), visualise=True,
transform_sqrt=False)
# set to True to enable manual debugging with graphical output,
# must be False for automatic testing
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.colorbar()
plt.title('image_float')
plt.subplot(1, 2, 2)
plt.imshow(hog_img)
plt.colorbar()
plt.title('HOG result visualisation, '
'orientations=%d' % (orientations))
plt.show()
# reshape resulting feature vector to matrix with N columns (each
# column corresponds to one direction),
hog_matrix = hog.reshape(-1, orientations)
# compute mean values in the resulting feature vector for each
# direction, these values should be almost equal to the global mean
# value (since the image contains a circle), i.e., all directions have
# same contribution to the result
actual = np.mean(hog_matrix, axis=0)
desired = np.mean(hog_matrix)
assert_almost_equal(actual, desired, decimal=1)
def test_hog_normalise_none_error_raised():
img = np.array([1, 2, 3])
assert_raises(ValueError, feature.hog, img, normalise=True)
if __name__ == '__main__':
np.testing.run_module_suite()
| bsd-3-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_grid.py | 1 | 23602 | from numbers import Number
import functools
import numpy as np
import matplotlib as mpl
from matplotlib import cbook
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, Divider
from .mpl_axes import Axes
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class CbarAxesBase:
def __init__(self, *args, orientation, **kwargs):
self.orientation = orientation
self._default_label_on = True
self._locator = None # deprecated.
super().__init__(*args, **kwargs)
@cbook._rename_parameter("3.2", "locator", "ticks")
def colorbar(self, mappable, *, ticks=None, **kwargs):
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
if mpl.rcParams["mpl_toolkits.legacy_colorbar"]:
cbook.warn_deprecated(
"3.2", message="Since %(since)s, mpl_toolkits's own colorbar "
"implementation is deprecated; it will be removed "
"%(removal)s. Set the 'mpl_toolkits.legacy_colorbar' rcParam "
"to False to use Matplotlib's default colorbar implementation "
"and suppress this deprecation warning.")
if ticks is None:
ticks = ticker.MaxNLocator(5) # For backcompat.
from .colorbar import Colorbar
cb = Colorbar(
self, mappable, orientation=orientation, ticks=ticks, **kwargs)
self._cbid = mappable.callbacksSM.connect(
'changed', cb.update_normal)
mappable.colorbar = cb
self._locator = cb.cbar_axis.get_major_locator()
else:
cb = mpl.colorbar.colorbar_factory(
self, mappable, orientation=orientation, ticks=ticks, **kwargs)
self._cbid = mappable.colorbar_cid # deprecated.
self._locator = cb.locator # deprecated.
self._config_axes()
return cb
cbid = cbook._deprecate_privatize_attribute(
"3.3", alternative="mappable.colorbar_cid")
locator = cbook._deprecate_privatize_attribute(
"3.3", alternative=".colorbar().locator")
def _config_axes(self):
"""Make an axes patch and outline."""
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
def cla(self):
super().cla()
self._config_axes()
class CbarAxes(CbarAxesBase, Axes):
pass
class Grid:
"""
A grid of Axes.
In Matplotlib, the axes location (and size) is specified in normalized
figure coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio; for example, it is difficult to
display multiple images of a same size with some fixed padding between
them. AxesGrid can be used in such case.
"""
_defaultAxesClass = Axes
@cbook._delete_parameter("3.3", "add_all")
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
label_mode="L",
axes_class=None,
*,
aspect=False,
):
"""
Parameters
----------
fig : `.Figure`
The parent figure.
rect : (float, float, float, float) or int
The axes position, as a ``(left, bottom, width, height)`` tuple or
as a three-digit subplot position code (e.g., "121").
nrows_ncols : (int, int)
Number of rows and columns in the grid.
ngrids : int or None, default: None
If not None, only the first *ngrids* axes in the grid are created.
direction : {"row", "column"}, default: "row"
Whether axes are created in row-major ("row by row") or
column-major order ("column by column").
axes_pad : float or (float, float), default: 0.02
Padding or (horizontal padding, vertical padding) between axes, in
inches.
add_all : bool, default: True
Whether to add the axes to the figure using `.Figure.add_axes`.
This parameter is deprecated.
share_all : bool, default: False
Whether all axes share their x- and y-axis. Overrides *share_x*
and *share_y*.
share_x : bool, default: True
Whether all axes of a column share their x-axis.
share_y : bool, default: True
Whether all axes of a row share their y-axis.
label_mode : {"L", "1", "all"}, default: "L"
Determines which axes will get tick labels:
- "L": All axes on the left column get vertical tick labels;
all axes on the bottom row get horizontal tick labels.
- "1": Only the bottom left axes is labelled.
- "all": all axes are labelled.
axes_class : subclass of `matplotlib.axes.Axes`, default: None
aspect : bool, default: False
Whether the axes aspect ratio follows the aspect ratio of the data
limits.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if not 0 < ngrids <= self._nrows * self._ncols:
raise Exception("")
self.ngrids = ngrids
self._horiz_pad_size, self._vert_pad_size = map(
Size.Fixed, np.broadcast_to(axes_pad, 2))
cbook._check_in_list(["column", "row"], direction=direction)
self._direction = direction
if axes_class is None:
axes_class = self._defaultAxesClass
elif isinstance(axes_class, (list, tuple)):
cls, kwargs = axes_class
axes_class = functools.partial(cls, **kwargs)
kw = dict(horizontal=[], vertical=[], aspect=aspect)
if isinstance(rect, (str, Number, SubplotSpec)):
self._divider = SubplotDivider(fig, rect, **kw)
elif len(rect) == 3:
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, **kw)
else:
raise Exception("")
rect = self._divider.get_position()
axes_array = np.full((self._nrows, self._ncols), None, dtype=object)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = sharey = axes_array[0, 0]
else:
sharex = axes_array[0, col] if share_x else None
sharey = axes_array[row, 0] if share_y else None
axes_array[row, col] = axes_class(
fig, rect, sharex=sharex, sharey=sharey)
self.axes_all = axes_array.ravel().tolist()
self.axes_column = axes_array.T.tolist()
self.axes_row = axes_array.tolist()
self.axes_llc = self.axes_column[0][-1]
self._init_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_locators(self):
h = []
h_ax_pos = []
for _ in range(self._ncols):
if h:
h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
for _ in range(self._nrows):
if v:
v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
# Good to propagate __len__ if we have __getitem__
def __len__(self):
return len(self.axes_all)
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
Return the number of rows and columns of the grid as (nrows, ncols).
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"""
Set the padding between the axes.
Parameters
----------
axes_pad : (float, float)
The padding (horizontal pad, vertical pad) in inches.
"""
self._horiz_pad_size.fixed_size = axes_pad[0]
self._vert_pad_size.fixed_size = axes_pad[1]
def get_axes_pad(self):
"""
Return the axes padding.
Returns
-------
hpad, vpad
Padding (horizontal pad, vertical pad) in inches.
"""
return (self._horiz_pad_size.fixed_size,
self._vert_pad_size.fixed_size)
def set_aspect(self, aspect):
"""Set the aspect of the SubplotDivider."""
self._divider.set_aspect(aspect)
def get_aspect(self):
"""Return the aspect of the SubplotDivider."""
return self._divider.get_aspect()
def set_label_mode(self, mode):
"""
Define which axes have tick labels.
Parameters
----------
mode : {"L", "1", "all"}
The label mode:
- "L": All axes on the left column get vertical tick labels;
all axes on the bottom row get horizontal tick labels.
- "1": Only the bottom left axes is labelled.
- "all": all axes are labelled.
"""
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
class ImageGrid(Grid):
# docstring inherited
_defaultCbarAxesClass = CbarAxes
@cbook._delete_parameter("3.3", "add_all")
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Parameters
----------
fig : `.Figure`
The parent figure.
rect : (float, float, float, float) or int
The axes position, as a ``(left, bottom, width, height)`` tuple or
as a three-digit subplot position code (e.g., "121").
nrows_ncols : (int, int)
Number of rows and columns in the grid.
ngrids : int or None, default: None
If not None, only the first *ngrids* axes in the grid are created.
direction : {"row", "column"}, default: "row"
Whether axes are created in row-major ("row by row") or
column-major order ("column by column"). This also affects the
order in which axes are accessed using indexing (``grid[index]``).
axes_pad : float or (float, float), default: 0.02in
Padding or (horizontal padding, vertical padding) between axes, in
inches.
add_all : bool, default: True
Whether to add the axes to the figure using `.Figure.add_axes`.
This parameter is deprecated.
share_all : bool, default: False
Whether all axes share their x- and y-axis.
aspect : bool, default: True
Whether the axes aspect ratio follows the aspect ratio of the data
limits.
label_mode : {"L", "1", "all"}, default: "L"
Determines which axes will get tick labels:
- "L": All axes on the left column get vertical tick labels;
all axes on the bottom row get horizontal tick labels.
- "1": Only the bottom left axes is labelled.
- "all": all axes are labelled.
cbar_mode : {"each", "single", "edge", None}, default: None
Whether to create a colorbar for "each" axes, a "single" colorbar
for the entire grid, colorbars only for axes on the "edge"
determined by *cbar_location*, or no colorbars. The colorbars are
stored in the :attr:`cbar_axes` attribute.
cbar_location : {"left", "right", "bottom", "top"}, default: "right"
cbar_pad : float, default: None
Padding between the image axes and the colorbar axes.
cbar_size : size specification (see `.Size.from_any`), default: "5%"
Colorbar size.
cbar_set_cax : bool, default: True
If True, each axes in the grid has a *cax* attribute that is bound
to associated *cbar_axes*.
axes_class : subclass of `matplotlib.axes.Axes`, default: None
"""
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
# The colorbar axes are created in _init_locators().
if add_all:
super().__init__(
fig, rect, nrows_ncols, ngrids,
direction=direction, axes_pad=axes_pad,
share_all=share_all, share_x=True, share_y=True, aspect=aspect,
label_mode=label_mode, axes_class=axes_class)
else: # Only show deprecation in that case.
super().__init__(
fig, rect, nrows_ncols, ngrids,
direction=direction, axes_pad=axes_pad, add_all=add_all,
share_all=share_all, share_x=True, share_y=True, aspect=aspect,
label_mode=label_mode, axes_class=axes_class)
if add_all:
for ax in self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
elif self._colorbar_mode == "edge":
for index, ax in enumerate(self.axes_all):
col, row = self._get_col_row(index)
if self._colorbar_location in ("left", "right"):
ax.cax = self.cbar_axes[row]
else:
ax.cax = self.cbar_axes[col]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
def _init_locators(self):
# Slightly abusing this method to inject colorbar creation into init.
if self._colorbar_pad is None:
# horizontal or vertical arrangement?
if self._colorbar_location in ("left", "right"):
self._colorbar_pad = self._horiz_pad_size.fixed_size
else:
self._colorbar_pad = self._vert_pad_size.fixed_size
self.cbar_axes = [
self._defaultCbarAxesClass(
self.axes_all[0].figure, self._divider.get_position(),
orientation=self._colorbar_location)
for _ in range(self.ngrids)]
cb_mode = self._colorbar_mode
cb_location = self._colorbar_location
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if cb_mode == "single" and cb_location in ("left", "bottom"):
if cb_location == "left":
sz = self._nrows * Size.AxesX(self.axes_llc)
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif cb_location == "bottom":
sz = self._ncols * Size.AxesY(self.axes_llc)
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if h:
h.append(self._horiz_pad_size)
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (cb_location == "left"
and (cb_mode == "each"
or (cb_mode == "edge" and col == 0))):
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if (cb_location == "right"
and (cb_mode == "each"
or (cb_mode == "edge" and col == self._ncols - 1))):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if v:
v.append(self._vert_pad_size)
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (cb_location == "bottom"
and (cb_mode == "each"
or (cb_mode == "edge" and row == 0))):
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if (cb_location == "top"
and (cb_mode == "each"
or (cb_mode == "edge" and row == self._nrows - 1))):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if cb_mode == "each":
if cb_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif cb_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif cb_mode == "edge":
if (cb_location == "left" and col == 0
or cb_location == "right" and col == self._ncols - 1):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows - 1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif (cb_location == "bottom" and row == self._nrows - 1
or cb_location == "top" and row == 0):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if cb_mode == "single":
if cb_location == "right":
sz = self._nrows * Size.AxesX(self.axes_llc)
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif cb_location == "top":
sz = self._ncols * Size.AxesY(self.axes_llc)
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if cb_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif cb_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif cb_mode == "edge":
if cb_location in ("right", "left"):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
| gpl-2.0 |
gengho/Car2know | setup.py | 1 | 1310 | """A setuptools based setup module.
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.MD'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='car2know',
version='1.0.0',
description='analyse the influx and outflux of Car2Go in different blocks in Seattle',
long_description=long_description,
url='https://github.com/gengho/Car2know',
author='Geng Zeng, Yuxuan Cheng, He Zhe, Xiasen Wang',
author_email='[email protected], [email protected],[email protected],[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Data analysis and visualization',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
keywords='car2go data visualization geoinformatics',
packages=find_packages(exclude=['Car2know', 'docs', 'data']),
install_requires=['shapely','geopandas','pandas','pysal','scipy','numpy','urllib2'],
)
"""
Ref:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
| mit |
aladagemre/windrose-drawer | windrose.py | 1 | 20507 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '1.4'
__author__ = 'Lionel Roubeyrie'
__mail__ = '[email protected]'
__license__ = 'CeCILL-B'
import matplotlib
import matplotlib.cm as cm
import numpy as np
from matplotlib.patches import Rectangle, Polygon
from matplotlib.ticker import ScalarFormatter, AutoLocator
from matplotlib.text import Text, FontProperties
from matplotlib.projections.polar import PolarAxes
from numpy.lib.twodim_base import histogram2d
import matplotlib.pyplot as plt
from pylab import poly_between
RESOLUTION = 100
ZBASE = -1000 #The starting zorder for all drawing, negative to have the grid on
class WindroseAxes(PolarAxes):
"""
Create a windrose axes
"""
def __init__(self, *args, **kwargs):
"""
See Axes base class for args and kwargs documentation
"""
#Uncomment to have the possibility to change the resolution directly
#when the instance is created
#self.RESOLUTION = kwargs.pop('resolution', 100)
PolarAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.radii_angle = 67.5
self.cla()
def cla(self):
"""
Clear the current axes
"""
PolarAxes.cla(self)
self.theta_angles = np.arange(0, 360, 45)
self.theta_labels = ['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S', 'S-E']
self.set_thetagrids(angles=self.theta_angles, labels=self.theta_labels)
self._info = {'dir' : list(),
'bins' : list(),
'table' : list()}
self.patches_list = list()
def _colors(self, cmap, n):
'''
Returns a list of n colors based on the colormap cmap
'''
return [cmap(i) for i in np.linspace(0.0, 1.0, n)]
def set_radii_angle(self, **kwargs):
"""
Set the radii labels angle
"""
null = kwargs.pop('labels', None)
angle = kwargs.pop('angle', None)
if angle is None:
angle = self.radii_angle
self.radii_angle = angle
print self.get_rmax()
radii = np.linspace(0.1, self.get_rmax(), 6)
radii_labels = [ "%.1f%%" %r for r in radii ]
radii_labels[0] = "" #Removing label 0
# radii_labels = ["" for r in radii]
null = self.set_rgrids(radii=radii, labels=radii_labels,
angle=self.radii_angle, **kwargs)
def _update(self):
self.set_rmax(rmax=np.max(np.sum(self._info['table'], axis=0)))
self.set_radii_angle(angle=self.radii_angle)
def legend(self, loc='lower left', **kwargs):
"""
Sets the legend location and her properties.
The location codes are
'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
If none of these are suitable, loc can be a 2-tuple giving x,y
in axes coords, ie,
loc = (0, 1) is left top
loc = (0.5, 0.5) is center, center
and so on. The following kwargs are supported:
isaxes=True # whether this is an axes legend
prop = FontProperties(size='smaller') # the font property
pad = 0.2 # the fractional whitespace inside the legend border
shadow # if True, draw a shadow behind legend
labelsep = 0.005 # the vertical space between the legend entries
handlelen = 0.05 # the length of the legend lines
handletextsep = 0.02 # the space between the legend line and legend text
axespad = 0.02 # the border between the axes and legend edge
"""
def get_handles():
handles = list()
for p in self.patches_list:
if isinstance(p, matplotlib.patches.Polygon) or \
isinstance(p, matplotlib.patches.Rectangle):
color = p.get_facecolor()
elif isinstance(p, matplotlib.lines.Line2D):
color = p.get_color()
else:
raise AttributeError("Can't handle patches")
handles.append(Rectangle((0, 0), 0.2, 0.2,
facecolor=color, edgecolor='black'))
return handles
def get_labels():
labels = np.copy(self._info['bins'])
labels = ["[%.1f : %0.1f[" %(labels[i], labels[i+1]) \
for i in range(len(labels)-1)]
return labels
null = kwargs.pop('labels', None)
null = kwargs.pop('handles', None)
handles = get_handles()
labels = get_labels()
self.legend_ = matplotlib.legend.Legend(self, handles, labels,
loc, **kwargs)
return self.legend_
def _init_plot(self, dir, var, **kwargs):
"""
Internal method used by all plotting commands
"""
#self.cla()
null = kwargs.pop('zorder', None)
#Init of the bins array if not set
bins = kwargs.pop('bins', None)
if bins is None:
bins = np.linspace(np.min(var), np.max(var), 6)
if isinstance(bins, int):
bins = np.linspace(np.min(var), np.max(var), bins)
bins = np.asarray(bins)
nbins = len(bins)
#Number of sectors
nsector = kwargs.pop('nsector', None)
if nsector is None:
nsector = 16
#Sets the colors table based on the colormap or the "colors" argument
colors = kwargs.pop('colors', None)
cmap = kwargs.pop('cmap', None)
if colors is not None:
if isinstance(colors, str):
colors = [colors]*nbins
if isinstance(colors, (tuple, list)):
if len(colors) != nbins:
raise ValueError("colors and bins must have same length")
else:
if cmap is None:
cmap = cm.jet
colors = self._colors(cmap, nbins)
#Building the angles list
angles = np.arange(0, -2*np.pi, -2*np.pi/nsector) + np.pi/2
normed = kwargs.pop('normed', False)
blowto = kwargs.pop('blowto', False)
#Set the global information dictionnary
self._info['dir'], self._info['bins'], self._info['table'] = histogram(dir, var, bins, nsector, normed, blowto)
return bins, nbins, nsector, colors, angles, kwargs
def contour(self, dir, var, **kwargs):
"""
Plot a windrose in linear mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.plot(angles, val, color=colors[i], zorder=zorder,
**kwargs)
self.patches_list.extend(patch)
self._update()
def contourf(self, dir, var, **kwargs):
"""
Plot a windrose in filled mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
null = kwargs.pop('edgecolor', None)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
xs, ys = poly_between(angles, 0, val)
patch = self.fill(xs, ys, facecolor=colors[i],
edgecolor=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
def bar(self, dir, var, **kwargs):
"""
Plot a windrose in bar mode. For each var bins and for each sector,
a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
* opening : float - between 0.0 and 1.0, to control the space between
each sector (1.0 for no space)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = kwargs.pop('opening', None)
if opening is None:
opening = 0.8
dtheta = 2*np.pi/nsector
opening = dtheta*opening
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening/2, offset), opening, val,
facecolor=colors[i], edgecolor=edgecolor, zorder=zorder,
**kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def box(self, dir, var, **kwargs):
"""
Plot a windrose in proportional bar mode. For each var bins and for each
sector, a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = np.linspace(0.0, np.pi/16, nbins)
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening[i]/2, offset), opening[i],
val, facecolor=colors[i], edgecolor=edgecolor,
zorder=zorder, **kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def histogram(dir, var, bins, nsector, normed=False, blowto=False):
"""
Returns an array where, for each sector of wind
(centred on the north), we have the number of time the wind comes with a
particular var (speed, polluant concentration, ...).
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
* bins : list - list of var category against we're going to compute the table
* nsector : integer - number of sectors
* normed : boolean - The resulting table is normed in percent or not.
* blowto : boolean - Normaly a windrose is computed with directions
as wind blows from. If true, the table will be reversed (usefull for
pollutantrose)
"""
if len(var) != len(dir):
raise ValueError, "var and dir must have same length"
angle = 360./nsector
dir_bins = np.arange(-angle/2 ,360.+angle, angle, dtype=np.float)
dir_edges = dir_bins.tolist()
dir_edges.pop(-1)
dir_edges[0] = dir_edges.pop(-1)
dir_bins[0] = 0.
var_bins = bins.tolist()
var_bins.append(np.inf)
if blowto:
dir = dir + 180.
dir[dir>=360.] = dir[dir>=360.] - 360
table = histogram2d(x=var, y=dir, bins=[var_bins, dir_bins],
normed=False)[0]
# add the last value to the first to have the table of North winds
table[:,0] = table[:,0] + table[:,-1]
# and remove the last col
table = table[:, :-1]
if normed:
table = table*100/table.sum()
return dir_edges, var_bins, table
def wrcontour(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contour(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrcontourf(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contourf(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbox(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.box(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbar(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.bar(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def clean(dir, var):
'''
Remove masked values in the two arrays, where if a direction data is masked,
the var data will also be removed in the cleaning process (and vice-versa)
'''
dirmask = dir.mask==False
varmask = var.mask==False
ind = dirmask*varmask
return dir[ind], var[ind]
if __name__=='__main__':
from pylab import figure, show, setp, random, grid, draw
vv=random(500)*6
dv=random(500)*360
fig = figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
# ax.contourf(dv, vv, bins=np.arange(0,8,1), cmap=cm.hot)
# ax.contour(dv, vv, bins=np.arange(0,8,1), colors='k')
# ax.bar(dv, vv, normed=True, opening=0.8, edgecolor='white')
ax.box(dv, vv, normed=True)
l = ax.legend(axespad=-0.10)
setp(l.get_texts(), fontsize=8)
draw()
#print ax._info
show()
| gpl-3.0 |
ooovector/qtlab_replacement | resonator_quality_factor_fit.py | 1 | 2043 | import pandas as pd
import numpy as np
from resonator_tools import circuit
#resonator_tools = imp.load_source('circuit', 'C:/python27/lib/site-packages/resonator_tools/circuit.py').load_module()
def resonator_quality_factor_fit(measurement, sweep_parameter_values, sweep_parameter_name='power', resonator_type='notch_port', delay=None, use_calibrate=False):
fitresults = []
sweep_parameter = measurement.datasets['S-parameter'].parameters[0].values
f_data = measurement.datasets['S-parameter'].parameters[1].values
z_data = measurement.datasets['S-parameter'].data
if use_calibrate:
max_power_id = np.argmax(sweep_parameter)
if resonator_type == 'notch_port':
fitter = circuit.notch_port(f_data = f_data, z_data_raw=z_data[max_power_id,:])
else:
fitter = circuit.reflection_port(f_data = f_data, z_data_raw=z_data[max_power_id,:])
delay, amp_norm, alpha, fr, Ql, A2, frcal = \
fitter.do_calibration(f_data, z_data[max_power_id,:],ignoreslope=True,guessdelay=False)
for power_id, power in enumerate(sweep_parameter_values):
try:
if use_calibrate:
fitter.z_data = fitter.do_normalization(fitter.f_data,z_data[power_id,:],delay,amp_norm,alpha,A2,frcal)
fitter.fitresults = fitter.circlefit(fitter.f_data,fitter.z_data,fr,Ql,refine_results=True,calc_errors=True)
else:
if resonator_type == 'notch_port':
#print ('notch_port')
fitter = circuit.notch_port(f_data = f_data, z_data_raw=z_data[power_id,:])
elif resonator_type == 'reflection_port':
#print ('reflection_port')
fitter = circuit.reflection_port(f_data = f_data, z_data_raw=z_data[power_id,:])
#print (power_id)
fitter.autofit(electric_delay=delay)
#print (fitter.fitresults)
fitter.fitresults[sweep_parameter_name] = power
fitter.fitresults['single_photon_limit'] = fitter.get_single_photon_limit()
fitresults.append(fitter.fitresults.copy())
#fitter.plotall()
#break
except:
pass
#plt.figure(power_id)
#fitter.plotall()
#print(fitter.fitresults)
return pd.DataFrame(fitresults) | gpl-3.0 |
wmvanvliet/mne-python | mne/viz/tests/test_3d.py | 1 | 35352 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
# Mark Wronkiewicz <[email protected]>
#
# License: Simplified BSD
import os.path as op
from pathlib import Path
import sys
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
import matplotlib.pyplot as plt
from matplotlib.colors import Colormap
from mne import (make_field_map, pick_channels_evoked, read_evokeds,
read_trans, read_dipole, SourceEstimate,
make_sphere_model, use_coil_def,
setup_volume_source_space, read_forward_solution,
convert_forward_solution, MixedSourceEstimate)
from mne.source_estimate import _BaseVolSourceEstimate
from mne.io import (read_raw_ctf, read_raw_bti, read_raw_kit, read_info,
read_raw_nirx)
from mne.io._digitization import write_dig
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne.minimum_norm import apply_inverse
from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
snapshot_brain_montage, plot_head_positions,
plot_alignment, plot_sensors_connectivity,
plot_brain_colorbar, link_brains, mne_analyze_colormap)
from mne.viz._3d import _process_clim, _linearize_map, _get_map_ticks
from mne.viz.utils import _fake_click
from mne.utils import (requires_nibabel, traits_test,
catch_logging, run_subprocess, modified_env)
from mne.datasets import testing
from mne.source_space import read_source_spaces
from mne.bem import read_bem_solution, read_bem_surfaces
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
trans_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
ctf_fname = op.join(data_dir, 'CTF', 'testdata_ctf.ds')
nirx_fname = op.join(data_dir, 'NIRx', 'nirscout',
'nirx_15_2_recording_w_short')
io_dir = op.join(op.abspath(op.dirname(__file__)), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fwd_fname2 = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
base_dir = op.join(io_dir, 'bti', 'tests', 'data')
pdf_fname = op.join(base_dir, 'test_pdf_linux')
config_fname = op.join(base_dir, 'test_config_linux')
hs_fname = op.join(base_dir, 'test_hs_linux')
sqd_fname = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
coil_3d = """# custom cube coil def
1 9999 1 8 3e-03 0.000e+00 "QuSpin ZFOPM 3mm cube"
0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
"""
def test_plot_head_positions():
"""Test plotting of head positions."""
info = read_info(evoked_fname)
pos = np.random.RandomState(0).randn(4, 10)
pos[:, 0] = np.arange(len(pos))
destination = (0., 0., 0.04)
with pytest.warns(None): # old MPL will cause a warning
plot_head_positions(pos)
plot_head_positions(pos, mode='field', info=info,
destination=destination)
plot_head_positions([pos, pos]) # list support
pytest.raises(ValueError, plot_head_positions, ['pos'])
pytest.raises(ValueError, plot_head_positions, pos[:, :9])
pytest.raises(ValueError, plot_head_positions, pos, 'foo')
with pytest.raises(ValueError, match='shape'):
plot_head_positions(pos, axes=1.)
@testing.requires_testing_data
@traits_test
@pytest.mark.slowtest
def test_plot_sparse_source_estimates(renderer_interactive, brain_gc):
"""Test plotting of (sparse) source estimates."""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
brain = plot_source_estimates(
stc, 'sample', colormap=colormap, background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True, clim='auto')
brain.close()
del brain
with pytest.raises(TypeError, match='figure must be'):
plot_source_estimates(
stc, 'sample', figure='foo', hemi='both', clim='auto',
subjects_dir=subjects_dir)
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int64)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
surf = plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
if renderer_interactive._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(surf, mayavi.modules.surface.Surface)
@testing.requires_testing_data
@traits_test
@pytest.mark.slowtest
def test_plot_evoked_field(renderer):
"""Test plotting evoked field."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed
for t in ['meg', None]:
with pytest.warns(RuntimeWarning, match='projection'):
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1,
ch_type=t)
fig = evoked.plot_field(maps, time=0.1)
if renderer._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(fig, mayavi.core.scene.Scene)
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@traits_test
def test_plot_alignment(tmpdir, renderer):
"""Test plotting of -trans.fif files and MEG sensor layouts."""
# generate fiducials file for testing
tempdir = str(tmpdir)
fiducials_path = op.join(tempdir, 'fiducials.fif')
fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1,
'r': [-0.08061612, -0.02908875, -0.04131077]},
{'coord_frame': 5, 'ident': 2, 'kind': 1,
'r': [0.00146763, 0.08506715, -0.03483611]},
{'coord_frame': 5, 'ident': 3, 'kind': 1,
'r': [0.08436285, -0.02850276, -0.04127743]}]
write_dig(fiducials_path, fid, 5)
renderer.backend._close_all()
evoked = read_evokeds(evoked_fname)[0]
sample_src = read_source_spaces(src_fname)
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
meg = ['helmet', 'sensors']
if system == 'KIT':
meg.append('ref')
fig = plot_alignment(info, read_trans(trans_fname), subject='sample',
subjects_dir=subjects_dir, meg=meg)
rend = renderer.backend._Renderer(fig=fig)
rend.close()
# KIT ref sensor coil def is defined
renderer.backend._close_all()
info = infos['Neuromag']
pytest.raises(TypeError, plot_alignment, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
pytest.raises(OSError, plot_alignment, info, trans_fname,
subject='sample', subjects_dir=subjects_dir, src='foo')
pytest.raises(ValueError, plot_alignment, info, trans_fname,
subject='fsaverage', subjects_dir=subjects_dir,
src=sample_src)
sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True,
brain='white')
renderer.backend._close_all()
# no-head version
renderer.backend._close_all()
# all coord frames
plot_alignment(info) # works: surfaces='auto' default
for coord_frame in ('meg', 'head', 'mri'):
fig = plot_alignment(info, meg=['helmet', 'sensors'], dig=True,
coord_frame=coord_frame, trans=Path(trans_fname),
subject='sample', mri_fiducials=fiducials_path,
subjects_dir=subjects_dir, src=src_fname)
renderer.backend._close_all()
# EEG only with strange options
evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_eeg_ecog_seeg.info['projs'] = [] # "remove" avg proj
evoked_eeg_ecog_seeg.set_channel_types({'EEG 001': 'ecog',
'EEG 002': 'seeg'})
with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
with catch_logging() as log:
plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample',
trans=trans_fname, subjects_dir=subjects_dir,
surfaces=['white', 'outer_skin', 'outer_skull'],
meg=['helmet', 'sensors'],
eeg=['original', 'projected'], ecog=True, seeg=True,
verbose=True)
log = log.getvalue()
assert '1 ECoG location' in log
assert '1 sEEG location' in log
renderer.backend._close_all()
sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
bem_sol = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif'))
bem_surfs = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem.fif'))
sample_src[0]['coord_frame'] = 4 # hack for coverage
plot_alignment(info, subject='sample', eeg='projected',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain', 'inner_skull', 'outer_skull',
'outer_skin'])
plot_alignment(info, trans_fname, subject='sample', meg='helmet',
subjects_dir=subjects_dir, eeg='projected', bem=sphere,
surfaces=['head', 'brain'], src=sample_src)
assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
for surf in bem_sol['surfs'])
plot_alignment(info, trans_fname, subject='sample', meg=[],
subjects_dir=subjects_dir, bem=bem_sol, eeg=True,
surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
for surf in bem_sol['surfs'])
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=bem_surfs)
# single-layer BEM can still plot head surface
assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
bem_sol_homog = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif'))
for use_bem in (bem_surfs[-1:], bem_sol_homog):
with catch_logging() as log:
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=use_bem,
verbose=True)
log = log.getvalue()
assert 'not find the surface for head in the provided BEM model' in log
# sphere model
sphere = make_sphere_model('auto', 'auto', evoked.info)
src = setup_volume_source_space(sphere=sphere)
plot_alignment(info, eeg='projected', meg='helmet', bem=sphere,
src=src, dig=True, surfaces=['brain', 'inner_skull',
'outer_skull', 'outer_skin'])
sphere = make_sphere_model('auto', None, evoked.info) # one layer
# if you ask for a brain surface with a 1-layer sphere model it's an error
with pytest.raises(RuntimeError, match='Sphere model does not have'):
fig = plot_alignment(subject='sample', subjects_dir=subjects_dir,
surfaces=['brain'], bem=sphere)
# but you can ask for a specific brain surface, and
# no info is permitted
fig = plot_alignment(trans=trans_fname, subject='sample', meg=False,
coord_frame='mri', subjects_dir=subjects_dir,
surfaces=['white'], bem=sphere, show_axes=True)
renderer.backend._close_all()
if renderer._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(fig, mayavi.core.scene.Scene)
# 3D coil with no defined draw (ConvexHull)
info_cube = pick_info(info, [0])
info['dig'] = None
info_cube['chs'][0]['coil_type'] = 9999
with pytest.raises(RuntimeError, match='coil definition not found'):
plot_alignment(info_cube, meg='sensors', surfaces=())
coil_def_fname = op.join(tempdir, 'temp')
with open(coil_def_fname, 'w') as fid:
fid.write(coil_3d)
with use_coil_def(coil_def_fname):
plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True)
# one layer bem with skull surfaces:
with pytest.raises(RuntimeError, match='Sphere model does not.*boundary'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['brain', 'head', 'inner_skull'], bem=sphere)
# wrong eeg value:
with pytest.raises(ValueError, match='Invalid value for the .eeg'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir, eeg='foo')
# wrong meg value:
with pytest.raises(ValueError, match='Invalid value for the .meg'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir, meg='bar')
# multiple brain surfaces:
with pytest.raises(ValueError, match='Only one brain surface can be plot'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['white', 'pial'])
with pytest.raises(TypeError, match='surfaces.*must be'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=[1])
with pytest.raises(ValueError, match='Unknown surface type'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['foo'])
with pytest.raises(TypeError, match="must be an instance of "):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=dict(brain='super clear'))
with pytest.raises(ValueError, match="must be between 0 and 1"):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=dict(brain=42))
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fwd = read_forward_solution(fwd_fname)
plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=trans_fname, fwd=fwd,
surfaces='white', coord_frame='head')
fwd = convert_forward_solution(fwd, force_fixed=True)
plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=trans_fname, fwd=fwd,
surfaces='white', coord_frame='head')
# surfaces as dict
plot_alignment(subject='sample', coord_frame='head',
subjects_dir=subjects_dir,
surfaces={'white': 0.4, 'outer_skull': 0.6, 'head': None})
# fNIRS (default is pairs)
info = read_raw_nirx(nirx_fname).info
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True)
log = log.getvalue()
assert '26 fNIRS pairs' in log
assert '26 fNIRS locations' not in log
assert '26 fNIRS sources' not in log
assert '26 fNIRS detectors' not in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs=['channels', 'sources', 'detectors'])
log = log.getvalue()
assert '26 fNIRS pairs' not in log
assert '26 fNIRS locations' in log
assert '26 fNIRS sources' in log
assert '26 fNIRS detectors' in log
renderer.backend._close_all()
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@traits_test
def test_process_clim_plot(renderer_interactive, brain_gc):
"""Test functionality for determining control points with stc.plot."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1,
time_viewer=False, show_traces=False)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
brain = stc.plot(**kwargs)
assert brain.data['center'] is None
brain.close()
brain = stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
assert brain.data['center'] == 0.
brain.close()
brain = stc.plot(colormap='hot', clim='auto', **kwargs)
brain.close()
brain = stc.plot(colormap='mne', clim='auto', **kwargs)
brain.close()
brain = stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
**kwargs)
brain.close()
with pytest.raises(TypeError, match='must be a'):
stc.plot(clim='auto', figure=[0], **kwargs)
# Test for correct clim values
with pytest.raises(ValueError, match='monotonically'):
stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
with pytest.raises(ValueError, match="'value', 'values', and 'percent'"):
stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
with pytest.raises(ValueError, match='must be "auto" or dict'):
stc.plot(colormap='mne', clim='foo', **kwargs)
with pytest.raises(TypeError, match='must be an instance of'):
plot_source_estimates('foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='hemi'):
stc.plot(hemi='foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='Exactly one'):
stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
**kwargs)
# Test handling of degenerate data: thresholded maps
stc._data.fill(0.)
with pytest.warns(RuntimeWarning, match='All data were zero'):
brain = plot_source_estimates(stc, **kwargs)
brain.close()
def _assert_mapdata_equal(a, b):
__tracebackhide__ = True
assert set(a.keys()) == {'clim', 'colormap', 'transparent'}
assert a.keys() == b.keys()
assert a['transparent'] == b['transparent'], 'transparent'
aa, bb = a['clim'], b['clim']
assert aa.keys() == bb.keys(), 'clim keys'
assert aa['kind'] == bb['kind'] == 'value'
key = 'pos_lims' if 'pos_lims' in aa else 'lims'
assert_array_equal(aa[key], bb[key], err_msg=key)
assert isinstance(a['colormap'], Colormap), 'Colormap'
assert isinstance(b['colormap'], Colormap), 'Colormap'
assert a['colormap'].name == b['colormap'].name
def test_process_clim_round_trip():
"""Test basic input-output support."""
# With some negative data
out = _process_clim('auto', 'auto', True, -1.)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=dict(kind='value', pos_lims=[1, 1, 1]),
transparent=True,)
_assert_mapdata_equal(out, want)
out2 = _process_clim(**out)
_assert_mapdata_equal(out, out2)
_linearize_map(out) # smoke test
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, 0, 1])
# With some positive data
out = _process_clim('auto', 'auto', True, 1.)
want = dict(
colormap=plt.get_cmap('hot'),
clim=dict(kind='value', lims=[1, 1, 1]),
transparent=True,)
_assert_mapdata_equal(out, want)
out2 = _process_clim(**out)
_assert_mapdata_equal(out, out2)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [1])
# With some actual inputs
clim = dict(kind='value', pos_lims=[0, 0.5, 1])
out = _process_clim(clim, 'auto', True)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=clim, transparent=True)
_assert_mapdata_equal(out, want)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, -0.5, 0, 0.5, 1])
clim = dict(kind='value', pos_lims=[0.25, 0.5, 1])
out = _process_clim(clim, 'auto', True)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=clim, transparent=True)
_assert_mapdata_equal(out, want)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, -0.5, -0.25, 0, 0.25, 0.5, 1])
@testing.requires_testing_data
@requires_nibabel()
def test_stc_mpl():
"""Test plotting source estimates with matplotlib."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
with pytest.warns(RuntimeWarning, match='not included'):
stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
hemi='rh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='oct1', initial_time=0.001,
colormap='Reds')
fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
hemi='lh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='ico2', time_viewer=True,
colormap='mne')
time_viewer = fig.time_viewer
_fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t
time_viewer.canvas.key_press_event('ctrl+right')
time_viewer.canvas.key_press_event('left')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
hemi='both', subject='sample', backend='matplotlib')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
time_unit='ss', subject='sample', backend='matplotlib')
@pytest.mark.timeout(60) # can sometimes take > 60 sec
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('coord_frame, idx, show_all, title',
[('head', 'gof', True, 'Test'),
('mri', 'amplitude', False, None)])
def test_plot_dipole_mri_orthoview(coord_frame, idx, show_all, title):
"""Test mpl dipole plotting."""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
fig = dipoles.plot_locations(trans=trans, subject='sample',
subjects_dir=subjects_dir,
coord_frame=coord_frame, idx=idx,
show_all=show_all, title=title,
mode='orthoview')
fig.canvas.scroll_event(0.5, 0.5, 1) # scroll up
fig.canvas.scroll_event(0.5, 0.5, -1) # scroll down
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('a') # some other key
ax = fig.add_subplot(211)
with pytest.raises(TypeError, match='instance of Axes3D'):
dipoles.plot_locations(trans, 'sample', subjects_dir, ax=ax)
@testing.requires_testing_data
def test_plot_dipole_orientations(renderer):
"""Test dipole plotting in 3d."""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
for coord_frame, mode in zip(['head', 'mri'],
['arrow', 'sphere']):
dipoles.plot_locations(trans=trans, subject='sample',
subjects_dir=subjects_dir,
mode=mode, coord_frame=coord_frame)
renderer.backend._close_all()
@testing.requires_testing_data
@traits_test
def test_snapshot_brain_montage(renderer):
"""Test snapshot brain montage."""
info = read_info(evoked_fname)
fig = plot_alignment(
info, trans=None, subject='sample', subjects_dir=subjects_dir)
xyz = np.vstack([ich['loc'][:3] for ich in info['chs']])
ch_names = [ich['ch_name'] for ich in info['chs']]
xyz_dict = dict(zip(ch_names, xyz))
xyz_dict[info['chs'][0]['ch_name']] = [1, 2] # Set one ch to only 2 vals
# Make sure wrong types are checked
pytest.raises(TypeError, snapshot_brain_montage, fig, xyz)
# All chs must have 3 position values
pytest.raises(ValueError, snapshot_brain_montage, fig, xyz_dict)
# Make sure we raise error if the figure has no scene
pytest.raises(ValueError, snapshot_brain_montage, None, info)
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@pytest.mark.parametrize('pick_ori', ('vector', None))
@pytest.mark.parametrize('kind', ('surface', 'volume', 'mixed'))
def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked,
pick_ori, kind, brain_gc):
"""Test plotting of scalar and vector source estimates."""
backend = renderer_interactive._get_3d_backend()
is_pyvista = backend != 'mayavi'
invs, evoked = all_src_types_inv_evoked
inv = invs[kind]
with pytest.warns(None): # PCA mag
stc = apply_inverse(evoked, inv, pick_ori=pick_ori)
stc.data[1] *= -1 # make it signed
meth_key = 'plot_3d' if isinstance(stc, _BaseVolSourceEstimate) else 'plot'
stc.subject = 'sample'
meth = getattr(stc, meth_key)
kwargs = dict(subjects_dir=subjects_dir,
time_viewer=False, show_traces=False, # for speed
smoothing_steps=1, verbose='error', src=inv['src'],
volume_options=dict(resolution=None), # for speed
)
if pick_ori != 'vector':
kwargs['surface'] = 'white'
kwargs['backend'] = backend
# Mayavi can't handle non-surface
if kind != 'surface' and not is_pyvista:
with pytest.raises(RuntimeError, match='PyVista'):
meth(**kwargs)
return
brain = meth(**kwargs)
brain.close()
del brain
these_kwargs = kwargs.copy()
these_kwargs['show_traces'] = 'foo'
with pytest.raises(ValueError, match='show_traces'):
meth(**these_kwargs)
del these_kwargs
if pick_ori == 'vector':
with pytest.raises(ValueError, match='use "pos_lims"'):
meth(**kwargs, clim=dict(pos_lims=[1, 2, 3]))
if kind in ('volume', 'mixed'):
with pytest.raises(TypeError, match='when stc is a mixed or vol'):
these_kwargs = kwargs.copy()
these_kwargs.pop('src')
meth(**these_kwargs)
with pytest.raises(ValueError, match='cannot be used'):
these_kwargs = kwargs.copy()
these_kwargs.update(show_traces=True, time_viewer=False)
meth(**these_kwargs)
if not is_pyvista:
with pytest.raises(ValueError, match='view_layout must be'):
meth(view_layout='horizontal', **kwargs)
# flatmaps (mostly a lot of error checking)
these_kwargs = kwargs.copy()
these_kwargs.update(surface='flat', views='auto')
if kind == 'surface' and pick_ori != 'vector' and is_pyvista:
with pytest.raises(FileNotFoundError, match='flatmap'):
meth(**these_kwargs) # sample does not have them
fs_stc = stc.copy()
fs_stc.subject = 'fsaverage' # this is wrong, but don't have to care
flat_meth = getattr(fs_stc, meth_key)
these_kwargs.pop('src')
if pick_ori == 'vector':
pass # can't even pass "surface" variable
elif kind != 'surface':
with pytest.raises(TypeError, match='SourceEstimate when a flatmap'):
flat_meth(**these_kwargs)
elif not is_pyvista:
with pytest.raises(RuntimeError, match='PyVista 3D backend.*flatmap'):
flat_meth(**these_kwargs)
else:
brain = flat_meth(**these_kwargs)
brain.close()
del brain
these_kwargs.update(surface='inflated', views='flat')
with pytest.raises(ValueError, match='surface="flat".*views="flat"'):
flat_meth(**these_kwargs)
# just test one for speed
if kind != 'mixed':
return
assert is_pyvista
brain = meth(
views=['lat', 'med', 'ven'], hemi='lh',
view_layout='horizontal', **kwargs)
brain.close()
assert brain._subplot_shape == (1, 3)
del brain
these_kwargs = kwargs.copy()
these_kwargs['volume_options'] = dict(blending='foo')
with pytest.raises(ValueError, match='mip'):
meth(**these_kwargs)
these_kwargs['volume_options'] = dict(badkey='foo')
with pytest.raises(ValueError, match='unknown'):
meth(**these_kwargs)
# with resampling (actually downsampling but it's okay)
these_kwargs['volume_options'] = dict(resolution=20., surface_alpha=0.)
brain = meth(**these_kwargs)
brain.close()
del brain
@pytest.mark.slowtest
@testing.requires_testing_data
def test_plot_sensors_connectivity(renderer):
"""Test plotting of sensors connectivity."""
from mne import io, pick_types
data_path = data_dir
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
raw = io.read_raw_fif(raw_fname)
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
eog=True, exclude='bads')
n_channels = len(picks)
con = np.random.RandomState(42).randn(n_channels, n_channels)
info = raw.info
with pytest.raises(TypeError):
plot_sensors_connectivity(info='foo', con=con,
picks=picks)
with pytest.raises(ValueError):
plot_sensors_connectivity(info=info, con=con[::2, ::2],
picks=picks)
plot_sensors_connectivity(info=info, con=con, picks=picks)
@pytest.mark.parametrize('orientation', ('horizontal', 'vertical'))
@pytest.mark.parametrize('diverging', (True, False))
@pytest.mark.parametrize('lims', ([0.5, 1, 10], [0, 1, 10]))
def test_brain_colorbar(orientation, diverging, lims):
"""Test brain colorbar plotting."""
_, ax = plt.subplots()
clim = dict(kind='value')
if diverging:
clim['pos_lims'] = lims
else:
clim['lims'] = lims
plot_brain_colorbar(ax, clim, orientation=orientation)
if orientation == 'vertical':
have, empty = ax.get_yticklabels, ax.get_xticklabels
else:
have, empty = ax.get_xticklabels, ax.get_yticklabels
if diverging:
if lims[0] == 0:
ticks = list(-np.array(lims[1:][::-1])) + lims
else:
ticks = list(-np.array(lims[::-1])) + [0] + lims
else:
ticks = lims
plt.draw()
assert_array_equal(
[float(h.get_text().replace('−', '-')) for h in have()], ticks)
assert_array_equal(empty(), [])
@pytest.mark.slowtest # slow-ish on Travis OSX
@testing.requires_testing_data
@traits_test
def test_mixed_sources_plot_surface(renderer_interactive):
"""Test plot_surface() for mixed source space."""
src = read_source_spaces(fwd_fname2)
N = np.sum([s['nuse'] for s in src]) # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
rng = np.random.RandomState(0)
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
stc = MixedSourceEstimate(data, vertno, 0, 1)
brain = stc.surface().plot(views='lat', hemi='split',
subject='fsaverage', subjects_dir=subjects_dir,
colorbar=False)
brain.close()
del brain
@testing.requires_testing_data
@traits_test
@pytest.mark.slowtest
def test_link_brains(renderer_interactive):
"""Test plotting linked brains."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
brain = plot_source_estimates(
stc, 'sample', colormap=colormap,
background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True,
clim='auto'
)
if renderer_interactive._get_3d_backend() == 'mayavi':
with pytest.raises(NotImplementedError, match='backend is pyvista'):
link_brains(brain)
else:
with pytest.raises(ValueError, match='is empty'):
link_brains([])
with pytest.raises(TypeError, match='type is Brain'):
link_brains('foo')
link_brains(brain, time=True, camera=True)
def test_renderer(renderer):
"""Test that renderers are available on demand."""
backend = renderer.get_3d_backend()
cmd = [sys.executable, '-uc',
'import mne; mne.viz.create_3d_figure((800, 600)); '
'backend = mne.viz.get_3d_backend(); '
'assert backend == %r, backend' % (backend,)]
with modified_env(MNE_3D_BACKEND=backend):
run_subprocess(cmd)
| bsd-3-clause |
devanshdalal/scikit-learn | sklearn/preprocessing/data.py | 15 | 68211 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
elijah513/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
devanshdalal/scikit-learn | benchmarks/bench_sgd_regression.py | 61 | 5612 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
| bsd-3-clause |
ChengeLi/VehicleTracking | fit_extrapolate.py | 1 | 17722 | #!/usr/bin/env python
import os
import math
import pdb,glob
import numpy as np
from scipy.io import loadmat,savemat
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
from scipy.interpolate import spline
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
from sets import Set
from warpTrj2parallel import loadWarpMtx
import statsmodels.api as sm
import cv2
import pdb
# from DataPathclass import *
# DataPathobj = DataPath(dataSource,VideoIndex)
# from parameterClass import *
# Parameterobj = parameter(dataSource,VideoIndex)
def warpTrj_using_Mtx(x_mtx,y_mtx,warpingMtx,limitX,limitY):
'warp the trj and save to warpped'
xyTupleMtx = np.zeros((x_mtx.shape[0],x_mtx.shape[1],2))
xyTupleMtx[:,:,0] = np.array(x_mtx,dtype='float32') #first dim is X!
xyTupleMtx[:,:,1] = np.array(y_mtx,dtype='float32')
warpped_xyTupleMtx = cv2.perspectiveTransform(np.array([xyTupleMtx.reshape((-1,2))],dtype='float32'), np.array(warpingMtx,dtype='float32'))[0,:,:].reshape((-1,Parameterobj.trunclen,2))
# warpped_x_mtx = np.int16(warpped_xyTupleMtx[:,:,0])
# warpped_y_mtx = np.int16(warpped_xyTupleMtx[:,:,1])
warpped_x_mtx = warpped_xyTupleMtx[:,:,0]
warpped_y_mtx = warpped_xyTupleMtx[:,:,1]
"""how to deal with out of range?????"""
warpped_x_mtx[warpped_x_mtx<0] = 0
warpped_y_mtx[warpped_y_mtx<0] = 0
warpped_x_mtx[warpped_x_mtx>=limitX] = limitX
warpped_y_mtx[warpped_y_mtx>=limitY] = limitY
warpped_x_mtx[x_mtx==0]=0
warpped_y_mtx[y_mtx==0]=0
return warpped_x_mtx,warpped_y_mtx
def filteringCriterion(xk,yk,xspd,yspd):
minspdth = Parameterobj.minspdth
transth = Parameterobj.transth
speed = np.abs(xspd)+np.abs(yspd)
livelong = len(xk)>Parameterobj.livelong_thresh # chk if trj is long enough
loc_change_th = Parameterobj.loc_change
if not livelong:
return False
else:
notStationary = sum(speed<3) < transth
moving1 = np.max(speed)>minspdth # check if it is a moving point
moving2 = (np.abs(np.sum(xspd))>=1e-2) and (np.abs(np.sum(yspd))>=1e-2)
loc_change = (np.max(xk)-np.min(xk)>=loc_change_th) or (np.max(yk)-np.min(yk)>=loc_change_th)
# if (np.sum(xspd)<=1e-2 and np.sum(xaccelerate)<=1e-1) or (np.sum(yspd)<=1e-2 and np.sum(yaccelerate)<=1e-1):
# if len(xspd)<=3 and (np.sum(xspd)<=1e-2) and (np.sum(xaccelerate)<=1e-1):
# if ((np.abs(np.sum(xspd))<=1e-2) and (np.abs(np.sum(yspd))<=1e-2)) or ((np.max(xk)-np.min(xk)<=5) and (np.max(yk)-np.min(yk)<=5)):
return bool(livelong*notStationary*moving1*moving2*loc_change)
def polyFitTrj(x,y,t,goodTrj):
p3 = [] #polynomial coefficients, order 3
for kk in goodTrj:
# p3.append(np.polyfit(x[kk,:][x[kk,:]!=0], y[kk,:][y[kk,:]!=0], 3)) # fit a poly line to the last K points
#### p3.append(np.polyfit( y[kk,:][y[kk,:]!=0], x[kk,:][x[kk,:]!=0],3))
# p3.append(np.polyfit(x[kk,:][x[kk,:]!=0], y[kk,:][y[kk,:]!=0], 2))
p3.append(np.polyfit(x[kk,:][t[kk,:]!=stuffer], y[kk,:][t[kk,:]!=stuffer], 2))
outlierID =[]
p3 = np.array(p3)
goodTrj = np.array(goodTrj)
"""Filtering based on curve shape, outlier of p3 discarded.
what abnormal trjs are you filtering out??? plot those bad outlier trjs"""
"""Maybe we should skip this??? draw me!"""
"""you will keep some very ziggy/jumy trjs"""
# for ii in range(p3.shape[1]):
# data = p3[:,ii]
# outlierID = outlierID+ list(np.where(np.isnan(data)==True)[0])
# mu,std = fitGaussian(data[np.ones(len(data), dtype=bool)-np.isnan(data)])
# outlierID = outlierID + list(np.where(data>=mu+std)[0])+list(np.where(data<=mu-std)[0])
# # print p3[outlierID,:]
# outlierID = np.unique(outlierID)
# TFid = np.ones(len(goodTrj),'bool')
# TFid[outlierID] = False
# goodTrj = goodTrj[TFid]
# p3 = p3[TFid]
return np.array(p3)
def filtering(x,y,xspd_mtx,yspd_mtx,t):
badTrj = []
goodTrj = []
for kk in range(x.shape[0]):
stuffer=np.max(t)
xk = x[kk,:][t[kk,:]!=stuffer] #use t to indicate
yk = y[kk,:][t[kk,:]!=stuffer]
# xaccelerate = np.diff(xspd)
# yaccelerate = np.diff(yspd)
xspd = xspd_mtx[kk,:][t[kk,:]!=stuffer][1:]
yspd = yspd_mtx[kk,:][t[kk,:]!=stuffer][1:]
# print sum(xspd)
# print sum(accelerate)
satisfyCriterion = filteringCriterion(xk,yk,xspd,yspd)
if not satisfyCriterion:
badTrj.append(kk)
# plt.plot(x[kk,:][x[kk,:]!=0],y[kk,:][y[kk,:]!=0])
else:
goodTrj.append(kk)
return np.array(goodTrj)
# extrapolate original trj
def extraPolate(xk, yk):
# positions to inter/extrapolate
# y_extraPosistion = np.linspace(start_Y, end_Y, 2)
y_extraPosistion = range(start_Y, end_Y, 1)
# spline order: 1 linear, 2 quadratic, 3 cubic ...
order = 1
# do inter/extrapolation
spline = InterpolatedUnivariateSpline(yk, xk, k=order)
x_extraPosistion = spline(y_extraPosistion)
# example showing the interpolation for linear, quadratic and cubic interpolation
# plt.figure()
plt.plot(x_extraPosistion, y_extraPosistion)
plt.draw()
# pdb.set_trace()
# for order in range(1, 4):
# s = InterpolatedUnivariateSpline(xi, yi, k=order)
# y = s(x)
# plt.plot(x, y)
plt.show()
def smooth(xk, yk):
# # x_smooth = np.linspace(xk.min(), xk.max(), 200)
# # y_smooth = spline(xk, yk, x_smooth,order = 2)
# y_smooth = np.linspace(yk.min(), yk.max(), 200)
# # x_smooth = spline(yk, xk, y_smooth, order = 1)
# s = InterpolatedUnivariateSpline(yk, xk, k=2)
# x_smooth = s(y_smooth)
# plt.plot(x_smooth, y_smooth, linewidth=1)
f1 = interp1d(xk, yk, kind='linear', axis=-1, copy=True, bounds_error=True, fill_value=np.nan, assume_sorted=False)
x_smooth_per_pixel = np.arange(xk.min(), xk.max(),0.5)
y_smooth_per_pixel = f1(x_smooth_per_pixel)
x_smooth_same_len = np.linspace(x_smooth_per_pixel.min(), x_smooth_per_pixel.max(),len(xk))
f2 = interp1d(x_smooth_per_pixel, y_smooth_per_pixel, kind='slinear', axis=-1, copy=True, bounds_error=True, fill_value=np.nan, assume_sorted=False)
y_smooth_same_len = f2(x_smooth_same_len)
# plt.plot(x_smooth, y_smooth, linewidth=1)
# plt.draw()
# pdb.set_trace()
return x_smooth_same_len, y_smooth_same_len
# k-means on polynomial coefs
def kmeansPolyCoeff(p3):
np.random.seed(5)
estimators = {'k_means_20': KMeans(n_clusters=20),
'k_means_8': KMeans(n_clusters=8),
'k_means_bad_init': KMeans(n_clusters=30, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
est.fit(p3)
labels = est.labels_
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(str(name))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
ax.scatter(p3[:1000, 0], p3[:1000, 1], p3[:1000, 2], c=labels[:1000].astype(np.float))
"""plot the raw trjs not the coefficients. see the mean center trjs, what are they look like"""
# ax.w_xaxis.set_ticklabels([])
# ax.w_yaxis.set_ticklabels([])
# ax.w_zaxis.set_ticklabels([])
fignum = fignum + 1
def readData(matfile):
# print "Processing truncation...", str(matidx+1)
ptstrj = loadmat(matfile)
x = csr_matrix(ptstrj['xtracks'], shape=ptstrj['xtracks'].shape).toarray()
y = csr_matrix(ptstrj['ytracks'], shape=ptstrj['ytracks'].shape).toarray()
t = csr_matrix(ptstrj['Ttracks'], shape=ptstrj['Ttracks'].shape).toarray()
return x,y,t,ptstrj
def lowessSmooth(xk,yk):
## fit (x,y) # use smooth() func to do spatial smooth
# lowessXY = sm.nonparametric.lowess(yk, xk, frac=0.1)
# plt.figure()
# plt.plot(xk, yk, '+')
# plt.plot(lowessXY[:, 0], lowessXY[:, 1])
# plt.show()
#fit x(t) and y(t) seperately
lowessX = sm.nonparametric.lowess(xk,range(len(xk)), frac=max(2.0/len(xk),0.1))
# plt.figure('smooth X(t)')
# plt.plot(range(len(xk)), xk, '+')
# plt.plot(lowessX[:, 0], lowessX[:, 1])
# plt.show()
xk_smooth = lowessX[:, 1]
lowessY = sm.nonparametric.lowess(yk,range(len(yk)), frac=max(2.0/len(xk),0.1))
# plt.figure('smooth Y(t)')
# plt.plot(range(len(yk)), yk, '+')
# plt.plot(lowessY[:, 0], lowessY[:, 1])
# plt.show()
yk_smooth = lowessY[:, 1]
# if np.sum(np.isnan(xk_smooth))>0:
# print 'X nan!!'
# if np.sum(np.isnan(yk_smooth))>0:
# print 'Y nan!!'
"""lowess returns nan and does not warn if there are too few neighbors!"""
xk_smooth[np.isnan(xk_smooth)] = xk[np.isnan(xk_smooth)]
yk_smooth[np.isnan(yk_smooth)] = yk[np.isnan(yk_smooth)]
return xk_smooth, yk_smooth
def getSpdMtx(dataMtx_withnan):
spdMtx = np.hstack((np.ones((dataMtx_withnan.shape[0],1))*np.nan,np.diff(dataMtx_withnan)))
spdMtx[np.isnan(spdMtx)]=0 # change every nan to 0
return spdMtx
def getSmoothMtx(x,y,t):
x_spatial_smooth_mtx = np.zeros(x.shape)
y_spatial_smooth_mtx = np.zeros(y.shape)
x_time_smooth_mtx = np.ones(x.shape)*np.nan
y_time_smooth_mtx = np.ones(y.shape)*np.nan
for kk in range(0,x.shape[0],1):
# print 'processing', kk, 'th trj'
# xk = x[kk,:][x[kk,:]!=0]
# yk = y[kk,:][y[kk,:]!=0]d
stuffer=np.max(t)
xk = x[kk,:][t[kk,:]!=stuffer] #use t to indicate
yk = y[kk,:][t[kk,:]!=stuffer]
if len(xk)>Parameterobj.livelong_thresh and (min(xk.max()-xk.min(), yk.max()-yk.min())>Parameterobj.loc_change): # range span >=2 pixels # loger than 5, otherwise all zero out
if len(xk)!=len(yk):
pdb.set_trace()
"""since trjs are too many, pre-filter out bad ones first before smoothing!!"""
"""prefiltering using not very precise speed before smooth"""
if not filteringCriterion(xk,yk,np.diff(xk),np.diff(yk)):
continue
x_spatial_smooth, y_spatial_smooth = smooth(xk, yk)
x_time_smooth, y_time_smooth = lowessSmooth(xk, yk)
# x_spatial_smooth_mtx[kk,:][x[kk,:]!=0]=x_spatial_smooth
# y_spatial_smooth_mtx[kk,:][y[kk,:]!=0]=y_spatial_smooth
# x_time_smooth_mtx[kk,:][x[kk,:]!=0]=x_time_smooth
# y_time_smooth_mtx[kk,:][y[kk,:]!=0]=y_time_smooth
x_spatial_smooth_mtx[kk,:][t[kk,:]!=stuffer]=x_spatial_smooth
y_spatial_smooth_mtx[kk,:][t[kk,:]!=stuffer]=y_spatial_smooth
x_time_smooth_mtx[kk,:][t[kk,:]!=stuffer]=x_time_smooth
y_time_smooth_mtx[kk,:][t[kk,:]!=stuffer]=y_time_smooth
xspd_smooth_mtx = getSpdMtx(x_time_smooth_mtx)
yspd_smooth_mtx = getSpdMtx(y_time_smooth_mtx)
x_time_smooth_mtx[np.isnan(x_time_smooth_mtx)]=0 # change nan back to zero for sparsity
y_time_smooth_mtx[np.isnan(y_time_smooth_mtx)]=0
return x_spatial_smooth_mtx,y_spatial_smooth_mtx,x_time_smooth_mtx,y_time_smooth_mtx, xspd_smooth_mtx,yspd_smooth_mtx
def plotTrj(x,y,t,p3=[],Trjchoice=[]):
if Trjchoice==[]:
Trjchoice=range(x.shape[0])
plt.ion()
plt.figure()
for ii in range(0,len(Trjchoice),1):
kk = Trjchoice[ii]
# xk = x[kk,:][x[kk,:]!=0]
# yk = y[kk,:][y[kk,:]!=0]
stuffer=np.max(t)
xk = x[kk,:][t[kk,:]!=stuffer] #use t to indicate
yk = y[kk,:][t[kk,:]!=stuffer]
if len(xk)>=Parameterobj.livelong_thresh and (min(xk.max()-xk.min(), yk.max()-yk.min())>2): # range span >=2 pixels
# plt.plot(xk)
# plt.plot(yk)
# plt.plot(xk, yk)
# extraPolate(xk, yk)
'''1'''
x_fit = np.linspace(xk.min(), xk.max(), 200)
# y_fit = pow(x_fit,3)*p3[ii,0] + pow(x_fit,2)*p3[ii,1] + pow(x_fit,1)*p3[ii,2]+ p3[ii,3]
y_fit = pow(x_fit,2)*p3[ii,0]+pow(x_fit,1)*p3[ii,1]+ p3[ii,2]
x_range = xk.max()-xk.min()
x_fit_extra = np.linspace(max(0,xk.min()-x_range*0.50), min(xk.max()+x_range*0.50,700), 200)
# y_fit_extra = pow(x_fit_extra,3)*p3[ii,0] + pow(x_fit_extra,2)*p3[ii,1] + pow(x_fit_extra,1)*p3[ii,2]+ p3[ii,3]
y_fit_extra = pow(x_fit_extra,2)*p3[ii,0]+pow(x_fit_extra,1)*p3[ii,1]+ p3[ii,2]
# '''2'''
# y_fit = np.linspace(yk.min(), yk.max(), 200)
# x_fit = pow(y_fit,3)*p3[ii,0] + pow(y_fit,2)*p3[ii,1] + pow(y_fit,1)*p3[ii,2]+ p3[ii,3]
# plt.plot(x_fit_extra, y_fit_extra,'r')
# plt.plot(x_fit, y_fit,'g')
plt.plot(x_fit, y_fit)
plt.draw()
pdb.set_trace()
plt.show()
pdb.set_trace()
def saveSmoothMat(x_smooth_mtx,y_smooth_mtx,xspd_smooth_mtx,yspd_smooth_mtx,goodTrj,ptstrj,matfile,p3 = None):
print "saving smooth new trj:", matfile
"""only keep the goodTrj, delete all bad ones"""
ptstrjNew = {}
goodTrj.astype(int)
ptstrjNew['xtracks'] = csr_matrix(x_smooth_mtx[goodTrj,:])
ptstrjNew['ytracks'] = csr_matrix(y_smooth_mtx[goodTrj,:])
ptstrjNew['Ttracks'] = ptstrj['Ttracks'][goodTrj,:]
ptstrjNew['trjID'] = ptstrj['trjID'][:,goodTrj]
ptstrjNew['Huetracks'] = ptstrj['Huetracks'][goodTrj,:]
if Parameterobj.useSBS:
ptstrjNew['fg_blob_index'] = ptstrj['fg_blob_index'][goodTrj,:]
ptstrjNew['fg_blob_center_X'] = ptstrj['fg_blob_center_X'][goodTrj,:]
ptstrjNew['fg_blob_center_Y'] = ptstrj['fg_blob_center_Y'][goodTrj,:]
# ptstrjNew['polyfitCoef'] = p3
ptstrjNew['xspd'] = csr_matrix(xspd_smooth_mtx[goodTrj,:])
ptstrjNew['yspd'] = csr_matrix(yspd_smooth_mtx[goodTrj,:])
ptstrjNew['Xdir'] = np.sum(xspd_smooth_mtx[goodTrj,:],1)>=0
ptstrjNew['Ydir'] = np.sum(yspd_smooth_mtx[goodTrj,:],1)>=0
if Parameterobj.useWarpped:
_, _, warpingMtx, limitX, limitY = loadWarpMtx()
warpped_x_mtx,warpped_y_mtx = warpTrj_using_Mtx(x_smooth_mtx[goodTrj,:],y_smooth_mtx[goodTrj,:],warpingMtx,limitX, limitY)
ptstrjNew['xtracks_warpped'] = csr_matrix(warpped_x_mtx)
ptstrjNew['ytracks_warpped'] = csr_matrix(warpped_y_mtx)
warpped_xspd_mtx = getSpdMtx(warpped_x_mtx)
warpped_yspd_mtx = getSpdMtx(warpped_y_mtx)
ptstrjNew['xspd_warpped'] = csr_matrix(warpped_xspd_mtx)
ptstrjNew['yspd_warpped'] = csr_matrix(warpped_yspd_mtx)
ptstrjNew['Xdir_warpped'] = np.sum(warpped_xspd_mtx,1)>=0
ptstrjNew['Ydir_warpped'] = np.sum(warpped_yspd_mtx,1)>=0
# plt.figure()
# ax1 = plt.subplot2grid((1,3),(0, 0))
# ax2 = plt.subplot2grid((1,3),(0, 1))
# ax3 = plt.subplot2grid((1,3),(0, 2))
"""visualize before and after warping"""
# if Parameterobj.useWarpped:
# # bkg = cv2.imread('/media/My Book/DOT Video/2015-06-20_08h/frames2/00000000.jpg')
# # im = plt.imshow(bkg[:,:,::-1])
# for ii in range(len(goodTrj)):
# print ii
# xraw = x_smooth_mtx[goodTrj,:][ii,:]
# yraw = y_smooth_mtx[goodTrj,:][ii,:]
# start = min(np.min(np.where(xraw!=0)[0]),np.min(np.where(yraw!=0)[0]))
# end = max(np.max(np.where(xraw!=0)[0]),np.max(np.where(yraw!=0)[0]))
# xraw = xraw[start:end+1]
# yraw = yraw[start:end+1]
# xnew = warpped_x_mtx[ii,:][start:end+1]
# ynew = warpped_y_mtx[ii,:][start:end+1]
# plt.subplot(121)
# plt.axis('off')
# plt.plot(xraw,yraw,color = 'red',linewidth=2)
# plt.title('tracklets before perspective transformation', fontsize=10)
# plt.subplot(122)
# plt.ylim(700,0) ## flip the Y axis
# plt.plot(xnew,ynew,color = 'black',linewidth=2)
# plt.title('tracklets after perspective transformation', fontsize=10)
# plt.draw()
# plt.axis('off')
# parentPath = os.path.dirname(matfile)
# smoothPath = os.path.join(parentPath,'smooth/')
# if not os.path.exists(smoothPath):
# os.mkdir(smoothPath)
# onlyFileName = matfile[len(parentPath)+1:]
onlyFileName = matfile[len(DataPathobj.kltpath):]
savename = os.path.join(DataPathobj.smoothpath,onlyFileName)
savemat(savename,ptstrjNew)
# if __name__ == '__main__':
def fit_extrapolate_main(dataSource,VideoIndex):
# define start and end regions
#Canal video's dimensions:
# """(528, 704, 3)
# start :<=100,
# end: >=500,"""
import DataPathclass
global DataPathobj
DataPathobj = DataPathclass.DataPath(dataSource,VideoIndex)
import parameterClass
global Parameterobj
Parameterobj = parameterClass.parameter(dataSource,VideoIndex)
start_Y = 100;
end_Y = 500;
# matfilepath = '/Users/Chenge/Desktop/testklt/'
matfilepath = DataPathobj.kltpath
matfiles = sorted(glob.glob(matfilepath + '*.mat'))
# matfiles = sorted(glob.glob(matfilepath + 'klt_*.mat'))
# matfiles = sorted(glob.glob(matfilepath + 'sim*.mat'))
start_position = 0
matfiles = matfiles[start_position:]
existingFiles = sorted(glob.glob(DataPathobj.smoothpath+'*.mat'))
existingFileNames = []
for jj in range(len(existingFiles)):
existingFileNames.append(int(existingFiles[jj][-7:-4]))
# for matidx,matfile in enumerate(matfiles):
for matidx in range(len(matfiles)):
if (matidx+1) in existingFileNames:
print "alredy processed ", str(matidx+1)
continue
matfile = matfiles[matidx]
# "if consecutive points are similar to each other, merge them, using one to represent"
# didn't do this, smooth and resample instead
print "reading data", matfile
x,y,t,ptstrj = readData(matfile)
print "get spatial and temporal smooth matrix"
x_spatial_smooth_mtx,y_spatial_smooth_mtx,x_time_smooth_mtx,y_time_smooth_mtx, xspd_smooth_mtx,yspd_smooth_mtx = getSmoothMtx(x,y,t)
"""delete all-zero rows"""
good_index_before_filtering = np.where(np.sum(x_spatial_smooth_mtx,1)!=0)
x_spatial_smooth_mtx = x_spatial_smooth_mtx[good_index_before_filtering,:][0,:,:]
y_spatial_smooth_mtx = y_spatial_smooth_mtx[good_index_before_filtering,:][0,:,:]
x_time_smooth_mtx = x_time_smooth_mtx[good_index_before_filtering,:][0,:,:]
y_time_smooth_mtx = y_time_smooth_mtx[good_index_before_filtering,:][0,:,:]
xspd_smooth_mtx = xspd_smooth_mtx[good_index_before_filtering,:][0,:,:]
yspd_smooth_mtx = yspd_smooth_mtx[good_index_before_filtering,:][0,:,:]
t = t[good_index_before_filtering,:][0,:,:]
# plotTrj(x_smooth_mtx,y_smooth_mtx)
print "filtering out bad trajectories"
goodTrj = filtering(x_spatial_smooth_mtx,y_spatial_smooth_mtx,xspd_smooth_mtx,yspd_smooth_mtx,t)
# kmeansPolyCoeff(p3)
# plotTrj(x_spatial_smooth_mtx,y_spatial_smooth_mtx,t,Trjchoice = goodTrj)
print "saving=======!!"
saveSmoothMat(x_time_smooth_mtx,y_time_smooth_mtx,xspd_smooth_mtx,yspd_smooth_mtx,goodTrj,ptstrj,matfile)
| mit |
dsm054/pandas | pandas/core/indexes/datetimes.py | 1 | 60759 | # pylint: disable=E1101
from __future__ import division
from datetime import datetime, time, timedelta
import operator
import warnings
import numpy as np
from pytz import utc
from pandas._libs import (
Timestamp, index as libindex, join as libjoin, lib, tslib as libts)
from pandas._libs.tslibs import (
ccalendar, conversion, fields, parsing, timezones)
import pandas.compat as compat
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
_INT64_DTYPE, _NS_DTYPE, ensure_int64, is_datetime64_dtype,
is_datetime64_ns_dtype, is_datetimetz, is_dtype_equal, is_float,
is_integer, is_integer_dtype, is_list_like, is_period_dtype, is_scalar,
is_string_like, pandas_dtype)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.datetimes import (
DatetimeArrayMixin as DatetimeArray, _to_m8)
from pandas.core.base import _shared_docs
import pandas.core.common as com
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.datetimelike import (
DatelikeOps, DatetimeIndexOpsMixin, TimelikeOps, wrap_array_method,
wrap_field_accessor)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
import pandas.core.tools.datetimes as tools
from pandas.tseries import offsets
from pandas.tseries.frequencies import Resolution, to_offset
from pandas.tseries.offsets import CDay, prefix_mapping
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz', None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatetimeArray, DatelikeOps, TimelikeOps,
DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
name : object
Name to be stored in the index
dayfirst : bool, default False
If True, parse dates in `data` with the day first order
yearfirst : bool, default False
If True parse dates in `data` with the year first order
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
timetz
dayofyear
weekofyear
week
dayofweek
weekday
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_period
to_perioddelta
to_pydatetime
to_series
to_frame
month_name
day_name
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
See Also
---------
Index : The base pandas Index type
TimedeltaIndex : Index of timedelta64 data
PeriodIndex : Index of Period data
pandas.to_datetime : Convert argument to datetime
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]',
**kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_engine_type = libindex.DatetimeEngine
_tz = None
_freq = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
# dummy attribute so that datetime.__eq__(DatetimeArray) defers
# by returning NotImplemented
timetuple = None
# define my properties & methods for delegation
_bool_ops = ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'is_leap_year']
_object_ops = ['weekday_name', 'freq', 'tz']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'weekday', 'dayofweek',
'dayofyear', 'quarter', 'days_in_month',
'daysinmonth', 'microsecond',
'nanosecond']
_other_ops = ['date', 'time', 'timetz']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = ['to_period', 'tz_localize',
'tz_convert',
'normalize', 'strftime', 'round', 'floor',
'ceil', 'month_name', 'day_name']
_is_numeric_dtype = False
_infer_as_myclass = True
# --------------------------------------------------------------------
# Constructors
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None, tz=None,
normalize=False, closed=None, ambiguous='raise',
dayfirst=False, yearfirst=False, dtype=None,
copy=False, name=None, verify_integrity=True):
if data is None:
# TODO: Remove this block and associated kwargs; GH#20535
result = cls._generate_range(start, end, periods,
freq=freq, tz=tz, normalize=normalize,
closed=closed, ambiguous=ambiguous)
result.name = name
return result
if is_scalar(data):
raise TypeError("{cls}() must be called with a "
"collection of some kind, {data} was passed"
.format(cls=cls.__name__, data=repr(data)))
# - Cases checked above all return/raise before reaching here - #
# This allows to later ensure that the 'copy' parameter is honored:
if isinstance(data, Index):
ref_to_data = data._data
else:
ref_to_data = data
if name is None and hasattr(data, 'name'):
name = data.name
freq, freq_infer = dtl.maybe_infer_freq(freq)
# if dtype has an embedded tz, capture it
tz = dtl.validate_tz_from_dtype(dtype, tz)
if not isinstance(data, (np.ndarray, Index, ABCSeries, DatetimeArray)):
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
elif isinstance(data, ABCSeries):
data = data._values
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
is_integer_dtype(data) or lib.infer_dtype(data) == 'integer'):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
if isinstance(data, DatetimeArray):
if tz is None:
tz = data.tz
elif data.tz is None:
data = data.tz_localize(tz, ambiguous=ambiguous)
else:
# the tz's must match
if not timezones.tz_compare(tz, data.tz):
msg = ('data is already tz-aware {0}, unable to '
'set specified tz: {1}')
raise TypeError(msg.format(data.tz, tz))
subarr = data._data
if freq is None:
freq = data.freq
verify_integrity = False
elif issubclass(data.dtype.type, np.datetime64):
if data.dtype != _NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
if tz is not None:
# Convert tz-naive to UTC
tz = timezones.maybe_get_tz(tz)
data = conversion.tz_localize_to_utc(data.view('i8'), tz,
ambiguous=ambiguous)
subarr = data.view(_NS_DTYPE)
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64, copy=False)
subarr = data.view(_NS_DTYPE)
assert isinstance(subarr, np.ndarray), type(subarr)
assert subarr.dtype == 'M8[ns]', subarr.dtype
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
# dtype must be coerced to DatetimeTZDtype above
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
cls._validate_frequency(subarr, freq, ambiguous=ambiguous)
if freq_infer:
subarr.freq = to_offset(subarr.inferred_freq)
return subarr._deepcopy_if_needed(ref_to_data, copy)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None,
dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
# DatetimeArray._simple_new will accept either i8 or M8[ns] dtypes
assert isinstance(values, np.ndarray), type(values)
result = super(DatetimeIndex, cls)._simple_new(values, freq, tz,
**kwargs)
result.name = name
result._reset_identity()
return result
# --------------------------------------------------------------------
@property
def _values(self):
# tz-naive -> ndarray
# tz-aware -> DatetimeIndex
if self.tz is not None:
return self
else:
return self.values
@property
def tz(self):
# GH 18595
return self._tz
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError("Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate")
@property
def size(self):
# TODO: Remove this when we have a DatetimeTZArray
# Necessary to avoid recursion error since DTI._values is a DTI
# for TZ-aware
return self._ndarray_values.size
@property
def shape(self):
# TODO: Remove this when we have a DatetimeTZArray
# Necessary to avoid recursion error since DTI._values is a DTI
# for TZ-aware
return self._ndarray_values.shape
@property
def nbytes(self):
# TODO: Remove this when we have a DatetimeTZArray
# Necessary to avoid recursion error since DTI._values is a DTI
# for TZ-aware
return self._ndarray_values.nbytes
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return libts.ints_to_pydatetime(self.asi8, self.tz)
@cache_readonly
def _is_dates_only(self):
"""Return a boolean if we are only dates (and don't have a timezone)"""
from pandas.io.formats.format import _is_dates_only
return _is_dates_only(self.values) and self.tz is None
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self._freq = own_state[1]
self._tz = timezones.tz_standardize(own_state[2])
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.io.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return libts.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if (is_datetime64_ns_dtype(dtype) and
not is_dtype_equal(dtype, self.dtype)):
# GH 18951: datetime64_ns dtype but not equal means different tz
new_tz = getattr(dtype, 'tz', None)
if getattr(self.dtype, 'tz', None) is None:
return self.tz_localize(new_tz)
return self.tz_convert(new_tz)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return super(DatetimeIndex, self).astype(dtype, copy=copy)
def _get_time_micros(self):
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return fields.get_time_micros(values)
def to_series(self, keep_tz=False, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
if keep_tz and self.tz is not None:
# preserve the tz & copy
values = self.copy(deep=True)
else:
values = self.values.copy()
return Series(values, index=index, name=name)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
# TODO: what about self.name? if so, use shallow_copy?
def unique(self, level=None):
# Override here since IndexOpsMixin.unique uses self._values.unique
# For DatetimeIndex with TZ, that's a DatetimeIndex -> recursion error
# So we extract the tz-naive DatetimeIndex, unique that, and wrap the
# result with out TZ.
if self.tz is not None:
naive = type(self)(self._ndarray_values, copy=False)
else:
naive = self
result = super(DatetimeIndex, naive).unique(level=level)
return self._shallow_copy(result.values)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super(DatetimeIndex, self).union(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result._tz = timezones.tz_standardize(this.tz)
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.freq = to_offset(result.inferred_freq)
return result
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this._tz = timezones.tz_standardize(tz)
return this
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'integer', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers, sort=sort)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if not timezones.tz_compare(self.tz, other.tz):
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
if (isinstance(other, DatetimeIndex) and
self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + freq) or right_start in left
except (ValueError):
# if we are comparing a freq that does not propagate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# TODO: consider re-implementing freq._should_cache for fastpath
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError('Passed item and index have different timezone')
return self._shallow_copy(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
elif (other.freq is None or self.freq is None or
other.freq != self.freq or
not other.freq.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = ccalendar.get_days_in_month(parsed.year, parsed.month)
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = ccalendar.get_days_in_month(parsed.year, qe) # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59,
59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(),
tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if (is_monotonic and reso in ['day', 'hour', 'minute', 'second'] and
self._resolution >= Resolution.get_reso(reso)):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
# See also GH14826
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
t2.value < stamps[0]) or
((use_rhs and t1.value > stamps[-1] and
t2.value > stamps[-1])))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(
t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(
t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return com.maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(com.values_from_object(series),
key, tz=self.tz)
return com.maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
elif isinstance(key, timedelta):
# GH#20464
raise TypeError("Cannot index {cls} with {other}"
.format(cls=type(self).__name__,
other=type(key).__name__))
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except KeyError:
raise KeyError(key)
except ValueError as e:
# list-like tolerance size must match target index size
if 'list-like' in str(e):
raise e
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice', label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parsing.parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == 'left' else lower
return lower if side == 'left' else upper
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parsing.parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(
start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(
end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# --------------------------------------------------------------------
# Wrapping DatetimeArray
_timezone = cache_readonly(DatetimeArray._timezone.fget)
is_normalized = cache_readonly(DatetimeArray.is_normalized.fget)
_resolution = cache_readonly(DatetimeArray._resolution.fget)
year = wrap_field_accessor(DatetimeArray.year)
month = wrap_field_accessor(DatetimeArray.month)
day = wrap_field_accessor(DatetimeArray.day)
hour = wrap_field_accessor(DatetimeArray.hour)
minute = wrap_field_accessor(DatetimeArray.minute)
second = wrap_field_accessor(DatetimeArray.second)
microsecond = wrap_field_accessor(DatetimeArray.microsecond)
nanosecond = wrap_field_accessor(DatetimeArray.nanosecond)
weekofyear = wrap_field_accessor(DatetimeArray.weekofyear)
week = weekofyear
dayofweek = wrap_field_accessor(DatetimeArray.dayofweek)
weekday = dayofweek
weekday_name = wrap_field_accessor(DatetimeArray.weekday_name)
dayofyear = wrap_field_accessor(DatetimeArray.dayofyear)
quarter = wrap_field_accessor(DatetimeArray.quarter)
days_in_month = wrap_field_accessor(DatetimeArray.days_in_month)
daysinmonth = days_in_month
is_month_start = wrap_field_accessor(DatetimeArray.is_month_start)
is_month_end = wrap_field_accessor(DatetimeArray.is_month_end)
is_quarter_start = wrap_field_accessor(DatetimeArray.is_quarter_start)
is_quarter_end = wrap_field_accessor(DatetimeArray.is_quarter_end)
is_year_start = wrap_field_accessor(DatetimeArray.is_year_start)
is_year_end = wrap_field_accessor(DatetimeArray.is_year_end)
is_leap_year = wrap_field_accessor(DatetimeArray.is_leap_year)
tz_localize = wrap_array_method(DatetimeArray.tz_localize, True)
tz_convert = wrap_array_method(DatetimeArray.tz_convert, True)
to_perioddelta = wrap_array_method(DatetimeArray.to_perioddelta,
False)
to_period = wrap_array_method(DatetimeArray.to_period, True)
normalize = wrap_array_method(DatetimeArray.normalize, True)
to_julian_date = wrap_array_method(DatetimeArray.to_julian_date,
False)
month_name = wrap_array_method(DatetimeArray.month_name, True)
day_name = wrap_array_method(DatetimeArray.day_name, True)
# --------------------------------------------------------------------
@Substitution(klass='DatetimeIndex')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_NS_DTYPE, copy=False)
else:
value = _to_m8(value, tz=self.tz)
return self.values.searchsorted(value, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item) and not isna(item):
raise ValueError(
'Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return self._shallow_copy(new_dates, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return self._shallow_copy(new_dates, freq=freq)
def indexer_at_time(self, time, asof=False):
"""
Returns index locations of index values at particular time of day
(e.g. 9:30AM).
Parameters
----------
time : datetime.time or string
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p").
Returns
-------
values_at_time : array of integers
See Also
--------
indexer_between_time, DataFrame.at_time
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Return index locations of values between particular times of day
(e.g., 9:00-9:30AM).
Parameters
----------
start_time, end_time : datetime.time, str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p").
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : array of integers
See Also
--------
indexer_at_time, DataFrame.between_time
"""
start_time = tools.to_time(start_time)
end_time = tools.to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
DatetimeIndex._add_comparison_ops()
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def date_range(start=None, end=None, periods=None, freq=None, tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
pandas.DatetimeIndex : An immutable container for datetimes.
pandas.timedelta_range : Return a fixed frequency TimedeltaIndex.
pandas.period_range : Return a fixed frequency PeriodIndex.
pandas.interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D'
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, weekmask=None, holidays=None,
closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates.
end : string or datetime-like, default None
Right bound for generating dates.
periods : integer, default None
Number of periods to generate.
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'.
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : string, default None
Name of the resulting DatetimeIndex.
weekmask : string or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
.. versionadded:: 0.21.0
holidays : list-like or None, default None
Dates to exclude from the set of valid business days, passed to
``numpy.busdaycalendar``, only used when custom frequency strings
are passed.
.. versionadded:: 0.21.0
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
**kwargs
For compatibility. Has no effect on the result.
Notes
-----
Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. Specifying ``freq`` is a requirement
for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
desired.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
DatetimeIndex
Examples
--------
Note how the two weekend days are skipped in the result.
>>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-08'],
dtype='datetime64[ns]', freq='B')
"""
if freq is None:
msg = 'freq must be specified for bdate_range; use date_range instead'
raise TypeError(msg)
if is_string_like(freq) and freq.startswith('C'):
try:
weekmask = weekmask or 'Mon Tue Wed Thu Fri'
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError):
msg = 'invalid custom frequency string: {freq}'.format(freq=freq)
raise ValueError(msg)
elif holidays or weekmask:
msg = ('a custom frequency string is required when holidays or '
'weekmask are passed, got frequency {freq}').format(freq=freq)
raise ValueError(msg)
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the
default frequency
.. deprecated:: 0.21.0
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
weekmask : string, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
warnings.warn("cdate_range is deprecated and will be removed in a future "
"version, instead use pd.bdate_range(..., freq='{freq}')"
.format(freq=freq), FutureWarning, stacklevel=2)
if freq == 'C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
| bsd-3-clause |
Akshay0724/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
CoBiG2/RAD_Tools | skyline_creator.py | 1 | 2134 | #!/usr/bin/python
from collections import OrderedDict
import matplotlib.pyplot as plt
import sys
import numpy as np
from scipy.interpolate import spline
import matplotlib.ticker as ticker
def parse_csv(csv_file):
"""
Parses csv file with similar format to the one generated by Tracer
Example:
time; mean; median; hpd lower 95; hpd upper 95
"""
csv_data = OrderedDict()
csv_fh = open(csv_file)
next(csv_fh)
for line in csv_fh:
fields = line.split(",")
fields = [float(x) for x in fields]
# Get time as key and median and HPD bounds as value list
csv_data[float(fields[0])] = fields[2:]
return csv_data
def skyline_plot(csv_data, output_file):
"""
Creates a skyline style plot from data on a csv_file. This csv file should
be compliant with the one generated by Tracer and is parsed by parse_csv
function.
"""
fig, ax = plt.subplots()
#x_data = list(csv_data.keys())
x_data = np.arange(len(csv_data))
median_data = np.array([x[0] for x in csv_data.values()])
lower_hpd = np.array([x[1] for x in csv_data.values()])
higher_hpd = np.array([x[2] for x in csv_data.values()])
plt.xticks(x_data, ["%.2E" % x for x in csv_data.keys()], rotation=45,
ha="right")
xnew = np.linspace(x_data.min(),x_data.max(), 200)
smooth_median = spline(x_data, median_data, xnew)
smooth_lower = spline(x_data, lower_hpd, xnew)
smooth_higher = spline(x_data, higher_hpd, xnew)
ax.plot(xnew, smooth_median, "--", color="black")
#ax.fill_between(x_data, higher_hpd, lower_hpd, facecolor="blue", alpha=0.5)
ax.plot(xnew, smooth_lower, color="blue")
ax.plot(xnew, smooth_higher, color="blue")
ax.fill_between(xnew, smooth_higher, smooth_lower, facecolor="blue", alpha=0.3)
plt.xlabel("Time")
plt.ylabel("Ne")
plt.tight_layout()
plt.savefig("%s.svg" % (output_file))
def main():
# Get arguments
args = sys.argv
csv_file = args[1]
output_file = args[2]
csv_data = parse_csv(csv_file)
skyline_plot(csv_data, output_file)
main()
| gpl-3.0 |
aidiary/keras_examples | datagen/test_datagen3.py | 1 | 2539 | import os
import glob
import shutil
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
IMAGE_FILE = "../vgg16/elephant.jpg"
def draw_images(datagen, x, result_images):
# 出力先ディレクトリを作成
temp_dir = "temp"
os.mkdir(temp_dir)
# generatorから9個の画像を生成
# xは1サンプルのみなのでbatch_sizeは1で固定
g = datagen.flow(x, batch_size=1, save_to_dir=temp_dir, save_prefix='img', save_format='jpg')
for i in range(9):
batch = g.next()
# 生成した画像を3x3で描画
images = glob.glob(os.path.join(temp_dir, "*.jpg"))
fig = plt.figure()
gs = gridspec.GridSpec(3, 3)
gs.update(wspace=0.1, hspace=0.1)
for i in range(9):
img = load_img(images[i])
plt.subplot(gs[i])
plt.imshow(img, aspect='auto')
plt.axis("off")
plt.savefig(result_images)
# 出力先ディレクトリを削除
shutil.rmtree(temp_dir)
if __name__ == '__main__':
# 画像をロード(PIL形式画像)
img = load_img(IMAGE_FILE)
# numpy arrayに変換(row, col, channel)
x = img_to_array(img)
# print(x.shape)
# 4次元テンソルに変換(sample, row, col, channel)
x = np.expand_dims(x, axis=0)
# print(x.shape)
datagen = ImageDataGenerator(rotation_range=90)
draw_images(datagen, x, "result_rotation.jpg")
datagen = ImageDataGenerator(width_shift_range=0.2)
draw_images(datagen, x, "result_width_shift.jpg")
datagen = ImageDataGenerator(height_shift_range=0.2)
draw_images(datagen, x, "result_height_shift.jpg")
datagen = ImageDataGenerator(shear_range=0.78) # pi/4
draw_images(datagen, x, "result_shear.jpg")
datagen = ImageDataGenerator(zoom_range=0.5)
draw_images(datagen, x, "result_zoom.jpg")
datagen = ImageDataGenerator(channel_shift_range=100)
draw_images(datagen, x, "result_channel_shift.jpg")
datagen = ImageDataGenerator(horizontal_flip=True)
draw_images(datagen, x, "result_horizontal_flip.jpg")
datagen = ImageDataGenerator(vertical_flip=True)
draw_images(datagen, x, "result_vertical_flip.jpg")
datagen = ImageDataGenerator(samplewise_center=True)
draw_images(datagen, x, "result_samplewise_center.jpg")
datagen = ImageDataGenerator(samplewise_std_normalization=True)
draw_images(datagen, x, "result_samplewise_std_normalization.jpg")
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/feather_format.py | 6 | 2938 | """ feather-format compat """
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index
from pandas.compat import range
def _try_import():
# since pandas is a dependency of feather
# we need to import on first use
try:
import feather
except ImportError:
# give a nice error message
raise ImportError("the feather-format library is not installed\n"
"you can install via conda\n"
"conda install feather-format -c conda-forge\n"
"or via pip\n"
"pip install feather-format\n")
try:
feather.__version__ >= LooseVersion('0.3.1')
except AttributeError:
raise ImportError("the feather-format library must be >= "
"version 0.3.1\n"
"you can install via conda\n"
"conda install feather-format -c conda-forge"
"or via pip\n"
"pip install feather-format\n")
return feather
def to_feather(df, path):
"""
Write a DataFrame to the feather-format
Parameters
----------
df : DataFrame
path : string
File path
"""
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
feather = _try_import()
valid_types = {'string', 'unicode'}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
raise ValueError("feather does not support serializing {} "
"for the index; you can .reset_index()"
"to make the index into column(s)".format(
type(df.index)))
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError("feather does not support serializing a "
"non-default index for the index; you "
"can .reset_index() to make the index "
"into column(s)")
if df.index.name is not None:
raise ValueError("feather does not serialize index meta-data on a "
"default index")
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
feather.write_dataframe(df, path)
def read_feather(path):
"""
Load a feather-format object from the file path
.. versionadded 0.20.0
Parameters
----------
path : string
File path
Returns
-------
type of object stored in file
"""
feather = _try_import()
return feather.read_dataframe(path)
| agpl-3.0 |
ycasg/PyNLO | src/pynlo/interactions/ThreeWaveMixing/DFG_integrand.py | 2 | 28773 | # -*- coding: utf-8 -*-
"""
Difference frequency generation module
Defines:
- The dfg_problem, a class which can be intergrated by the pyNLO ODESolve
- The fftcomputer, which handles FFTs using pyFFTW
- A helper class, dfg_results_interface, which provides a Pulse-class based
wrapper around the dfg results.
Authors: Dan Maser, Gabe Ycas
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.fftpack as fftpack
from copy import deepcopy
from scipy import constants
from pynlo.light import OneDBeam, OneDBeam_highV_WG
from pynlo.light.DerivedPulses import NoisePulse
from pynlo.light.PulseBase import Pulse
from matplotlib import pyplot as plt
import logging
try:
import pyfftw
PYFFTW_AVAILABLE=True
except:
PYFFTW_AVAILABLE=False
class dfg_problem:
"""
This class defines the integrand for a DFG or OPO parametric inteaction.
Following Eqn (8) in Seres & Hebling, "Nonstationary theory of synchronously pumped femtosecond optical parametric oscillators", JOSA B Vol 17 No 5, 2000.
"""
last_calc_z = -1e6
overlap_pump = None
overlap_sgnl = None
overlap_idlr = None
pump_P_to_a = None
sgnl_P_to_a = None
idlr_P_to_a = None
_plot_beam_overlaps = False
_wg_mode = False
_Aeff_squm = 0.0
_pump_center_idx = 0.0
_prev_pp_boundary = None
_next_pp_boundary = None
_pp_sign = 1
_pp_last = 0
def __init__(self, pump_in, sgnl_in, crystal_in,
disable_SPM = False, pump_waist = 10e-6,
apply_gouy_phase = False, plot_beam_overlaps = False,
wg_mode = False, Aeff_squm = None):
""" Initialize DFG problem. The idler field must be derived from the
signal & idler, as its center frequency is exactly the difference-
frequency between pump & signal.
Setting the apply_gouy_phase flag to True enables the calculation of the
wavelength-dependent Gouy phase shift. This is disabled by default because
it is slow (if deemed important it could be sped up by inteprolation, but
the effect of Gouy phase seems small so it might not be worthwhile.) """
self._wg_mode = wg_mode
if self._wg_mode == False:
self.waist = pump_waist
self._plot_beam_overlaps = plot_beam_overlaps
self._calc_gouy = apply_gouy_phase
else:
assert Aeff_squm is not None
self._Aeff_squm = Aeff_squm
self._plot_beam_overlaps = False
self._calc_gouy = apply_gouy_phase
# Create idler Pulse.
# The idler grid must be centered to match DFG of the pump and signal
# center frequencies. The center matching is implicitly used in the
# mixing calculations to conserve energy
idler_cwl_natural = 1.0/(1.0/pump_in.center_wavelength_nm -\
1.0/sgnl_in.center_wavelength_nm)
idlr_in = NoisePulse(center_wavelength_nm = idler_cwl_natural,
frep_MHz = pump_in.frep_MHz,
NPTS = pump_in.NPTS,
time_window_ps = pump_in.time_window_ps)
# Double check that fields do not overlap
if ( max(pump_in.wl_nm) > min(sgnl_in.wl_nm) ):
print ("pump_max: ", max(pump_in.wl_nm))
print ("sgnl min: ", min(sgnl_in.wl_nm))
raise ValueError("Pump and signal field grids overlap.")
if ( max(sgnl_in.wl_nm) > min(idlr_in.wl_nm) ):
print ("sgnl max: ", max(sgnl_in.wl_nm))
print ("idlr min: ", min(idlr_in.wl_nm))
raise ValueError("Signal and idler field grids overlap.")
self.idlr_in = idlr_in
self.pump = deepcopy(pump_in)
self.sgnl = deepcopy(sgnl_in)
self.idlr = deepcopy(idlr_in)
self.crystal = deepcopy(crystal_in)
if self.crystal.mode == 'PP':
self.precompute_poling()
self.disable_SPM = disable_SPM
self.c = constants.speed_of_light
self.eps = constants.epsilon_0
self.frep = sgnl_in.frep_mks
self.veclength = sgnl_in.NPTS
if not pump_in.NPTS == sgnl_in.NPTS == idlr_in.NPTS:
raise ValueError("""Pump, signal, and idler do not have
same length.""")
if self._wg_mode == False:
if self.crystal.mode == 'BPM':
self.pump_beam = OneDBeam(self.waist, this_pulse = self.pump, axis = 'mix')
self.pump_beam.set_waist_to_match_central_waist(self.pump, self.waist, self.crystal)
# Pump beam sets all other beams' confocal parameters
self.sgnl_beam = OneDBeam(self.waist, this_pulse = self.sgnl ,axis = 'o')
self.sgnl_beam.set_waist_to_match_confocal(self.sgnl, self.pump, self.pump_beam, self.crystal)
self.idlr_beam = OneDBeam(self.waist , this_pulse = self.idlr,axis = 'o')
self.idlr_beam.set_waist_to_match_confocal(self.idlr, self.pump, self.pump_beam, self.crystal)
else:
self.pump_beam = OneDBeam(waist_meters = self.waist, this_pulse = self.pump)
self.pump_beam.set_waist_to_match_central_waist(self.pump, self.waist, self.crystal)
self.sgnl_beam = OneDBeam(waist_meters = self.waist, this_pulse = self.sgnl)
self.sgnl_beam.set_waist_to_match_confocal(self.sgnl, self.pump, self.pump_beam, self.crystal)
self.idlr_beam = OneDBeam(waist_meters = self.waist, this_pulse = self.idlr)
self.idlr_beam.set_waist_to_match_confocal(self.idlr, self.pump, self.pump_beam, self.crystal)
else:
# Waveguide mode. Only valid for ZZZ phase matching
assert self.crystal.mode != 'BPM'
self.pump_beam = OneDBeam_highV_WG(Aeff_squm = self._Aeff_squm, this_pulse = self.pump)
self.sgnl_beam = OneDBeam_highV_WG(Aeff_squm = self._Aeff_squm, this_pulse = self.sgnl)
self.idlr_beam = OneDBeam_highV_WG(Aeff_squm = self._Aeff_squm, this_pulse = self.idlr)
self.fftobject = fftcomputer(self.veclength)
self.ystart = np.array(np.hstack((self.pump.AW,
self.sgnl.AW,
self.idlr.AW )),
dtype='complex128')
# Preallocated mixing terms (work spaces)
self.AsAi = np.zeros((self.veclength,), dtype=np.complex128)
self.ApAi = np.zeros((self.veclength,), dtype=np.complex128)
self.ApAs = np.zeros((self.veclength,), dtype=np.complex128)
# Preallocated phasors for adding linear dispersion (work spaces)
self.phi_p = np.zeros((self.veclength,), dtype=np.complex128)
self.phi_s = np.zeros((self.veclength,), dtype=np.complex128)
self.phi_i = np.zeros((self.veclength,), dtype=np.complex128)
# Preallocated outputs (work spaces)
self.dApdZ = np.zeros((self.veclength,), dtype=np.complex128)
self.dAsdZ = np.zeros((self.veclength,), dtype=np.complex128)
self.dAidZ = np.zeros((self.veclength,), dtype=np.complex128)
# Relative wave numbers
self.k_p = self.pump_beam.get_k_in_crystal(pump_in, self.crystal)
self.k_s = self.sgnl_beam.get_k_in_crystal(sgnl_in, self.crystal)
self.k_i = self.idlr_beam.get_k_in_crystal(idlr_in, self.crystal)
self.k_p_0 = self.k_p[int(len(self.k_p)/2.0)]
self.k_s_0 = self.k_s[int(len(self.k_s)/2.0)]
self.k_i_0 = self.k_i[int(len(self.k_i)/2.0)]
self.k_p -= self.k_p_0
self.k_s -= self.k_s_0
self.k_i -= self.k_i_0
self.n_p = self.pump_beam.get_n_in_crystal(self.pump, self.crystal)
self.n_s = self.sgnl_beam.get_n_in_crystal(self.sgnl, self.crystal)
self.n_i = self.idlr_beam.get_n_in_crystal(self.idlr, self.crystal)
self._pump_center_idx = np.argmax(abs(self.pump.AW))
self.approx_pulse_speed = max([max(constants.speed_of_light / self.n_p),
max(constants.speed_of_light / self.n_s),
max(constants.speed_of_light / self.n_i)])
if not self.disable_SPM:
[self.jl_p, self.jl_s, self.jl_i] = np.zeros((3, self.veclength))
def helper_dxdy(self, x, y):
delta = np.diff(y) / np.diff(x)
return np.append(delta,delta[-1])
def vg(self, n, wl):
return self.c / (n - wl * self.helper_dxdy(wl, n))
def gen_jl(self, y):
""" Following Eqn (8) in Seres & Hebling, "Nonstationary theory of
synchronously pumped femtosecond optical parametric oscillators",
JOSA B Vol 17 No 5, 2000. A call to this function updates the
:math: `\chi_3` mixing terms used for four-wave mixing.
Parameters
----------
y : array-like, shape is 3 * NPTS
Concatenated pump, signal, and idler fields
"""
n_p = self.n_p
n_s = self.n_s
n_i = self.n_i
vg_p = self.vg(n_p, self.pump.w_hz)
vg_s = self.vg(n_s, self.sgnl.w_hz)
vg_i = self.vg(n_i, self.idlr.w_hz)
gamma_p = constants.epsilon_0 * 0.5 * n_p**2 * vg_p
gamma_s = constants.epsilon_0 * 0.5 * n_s**2 * vg_s
gamma_i = constants.epsilon_0 * 0.5 * n_i**2 * vg_i
jl = np.zeros((3, self.veclength), dtype = 'complex128')
gamma = [gamma_p, gamma_s, gamma_i]
waist = [self.pump_beam.waist, self.sgnl_beam.waist, self.idlr_beam.waist]
i = 0
for vec1 in [self.Ap, self.As, self.Ai]:
for vec2 in [self.Ap, self.As, self.Ai]:
if np.all(vec1 == vec2):
jl[i] = jl[i] + (1. / (2.*np.pi) * gamma[i] *
self.fftobject.corr(vec2, vec2) * np.sqrt(2. /
(self.c * self.eps * np.pi * waist[i]**2)))
else:
jl[i] = jl[i] + (1. / np.pi * gamma[i] *
self.fftobject.corr(vec2, vec2) * np.sqrt(2. /
(self.c * self.eps * np.pi * waist[i]**2)))
i += 1
[self.jl_p, self.jl_s, self.jl_i] = jl
def poling(self, x):
""" Helper function to get sign of :math: `d_\textrm{eff}` at position
:math: `x` in the crystal. Uses self.crystal's pp function.
For APPLN this is somewhat complicated. The input position x could
be many periods away from the previous value, and in either
direction. One solution would be carefully stepping back and forth,
but this needs to be perfect to prevent numerical errors.
Instead, precompute the domain boundaries and use a big comparison
to check the poling(z)
Returns
-------
x : int
Sign (+1 or -1) of :math: `d_\textrm{eff}`.
"""
if ((self.domain_lb < x) * (x < self.domain_ub)).any():
return -1
else:
return 1
def precompute_poling(self):
z_current = 0
domain_lb = []
domain_ub = []
while z_current < self.crystal.length_mks:
domain_lb.append(z_current+self.crystal.pp(z_current) * 0.5)
domain_ub.append(z_current+self.crystal.pp(z_current) * 1.0)
z_current += self.crystal.pp(z_current)
#print("precomputed",self.crystal.pp(z_current)," at z=",z_current)
if self.crystal.pp(z_current) <= 1e-6:
print("Error: poling period",self.crystal.pp(z_current)," at z=",z_current,"is too small")
self.domain_lb = np.array(domain_lb)
self.domain_ub = np.array(domain_ub)
plt.plot(self.domain_lb)
plt.plot(self.domain_ub)
def Ap(self, y):
return y[0 : self.veclength]
def As(self, y):
return y[self.veclength : 2 * self.veclength]
def Ai(self, y):
return y[2 * self.veclength : 3 * self.veclength]
# Integrand:
# State is defined by:
# 1.) fields in crystal
# 2.) values of k
# 3.) electric field->intensity conversion (~ area)
# Output is vector of estimate for d/dz field
def deriv(self, z, y, dydx):
assert not np.isnan(z)
if self.crystal.mode == 'PP':
deff = self.poling(z) * self.crystal.deff
elif self.crystal.mode == 'BPM' or self.crystal.mode == 'simple':
deff = self.crystal.deff
else:
raise AttributeError('Crystal type not known; deff not set.')
# After epic translation of Dopri853 from Numerical Recipes' C++ to
# native Python/NumPy, we can use complex numbers throughout:
t = z / float(self.approx_pulse_speed)
self.phi_p[:] = np.exp(1j * ((self.k_p + self.k_p_0) * z - t * self.pump.W_mks))
self.phi_s[:] = np.exp(1j * ((self.k_s + self.k_s_0) * z - t * self.sgnl.W_mks))
self.phi_i[:] = np.exp(1j * ((self.k_i + self.k_i_0) * z - t * self.idlr.W_mks))
z_to_focus = z - self.crystal.length_mks/2.0
if self._calc_gouy:
self.phi_p *= self.pump_beam.calculate_gouy_phase(z_to_focus, self.n_p)
self.phi_s *= self.sgnl_beam.calculate_gouy_phase(z_to_focus, self.n_s)
self.phi_i *= self.idlr_beam.calculate_gouy_phase(z_to_focus, self.n_i)
if not self.disable_SPM:
self.gen_jl(y)
if self._wg_mode == False:
waist_p = self.pump_beam.calculate_waist(z_to_focus, n_s = self.n_p)
waist_s = self.sgnl_beam.calculate_waist(z_to_focus, n_s = self.n_s)
waist_i = self.idlr_beam.calculate_waist(z_to_focus, n_s = self.n_i)
R_p = self.pump_beam.calculate_R(z_to_focus, n_s = self.n_p)
R_s = self.sgnl_beam.calculate_R(z_to_focus, n_s = self.n_s)
R_i = self.idlr_beam.calculate_R(z_to_focus, n_s = self.n_i)
# Geometric scaling factors (Not used)
# P_to_a is the conversion between average power and "effective intensity"
# The smallest area is used, as this is the part of the field which is
# interacting. THe larger fields must be scaled with a mode-match integral
# THe mode-match integral-based scale factor for Gaussian beams is
# 4 * w1**2 w2**2
# ---------------
# (w1**2 + w2 **2)**2
# This is the power coupling, so multiply larger 2 fields by sqrt(MMI)
#
# Attempting to limit interaction via mode-match integrals appears to
# (1) not conserve energy and (2) INCREASES the interaction strength.
# I'm not totally sure why, but it seems like the best course of action
# is to match confocal parameters (which is done in __init__, above).
# Overlap integrals are left intact, in case we want to plot them.
if self._wg_mode == False:
if (np.mean(waist_p) <= np.mean(waist_s)) and (np.mean(waist_p) <= np.mean(waist_i)):
self.pump_P_to_a = self.pump_beam.rtP_to_a_2(self.pump,self.crystal,z_to_focus)
self.sgnl_P_to_a = self.sgnl_beam.rtP_to_a_2(self.sgnl, self.crystal, None, waist_p)
self.idlr_P_to_a = self.idlr_beam.rtP_to_a_2(self.idlr, self.crystal, None, waist_p)
self.overlap_pump = 1.0
self.overlap_sgnl = np.sqrt(self.sgnl_beam.calc_overlap_integral(z_to_focus, self.sgnl, self.pump, self.pump_beam, self.crystal))
self.overlap_idlr = np.sqrt(self.idlr_beam.calc_overlap_integral(z_to_focus, self.idlr, self.pump, self.pump_beam, self.crystal))
elif np.mean(waist_s) <= np.mean(waist_i):
self.sgnl_P_to_a = self.sgnl_beam.rtP_to_a_2(self.sgnl,self.crystal,z_to_focus)
self.pump_P_to_a = self.pump_beam.rtP_to_a_2(self.pump, self.crystal, None, waist_s)
self.idlr_P_to_a = self.idlr_beam.rtP_to_a_2(self.idlr, self.crystal, None, waist_s)
self.overlap_pump = np.sqrt(self.pump_beam.calc_overlap_integral(z_to_focus, self.pump, self.sgnl, self.sgnl_beam, self.crystal))
self.overlap_sgnl = 1.0
self.overlap_idlr = np.sqrt(self.idlr_beam.calc_overlap_integral(z_to_focus, self.idlr, self.sgnl, self.sgnl_beam, self.crystal))
else:
self.idlr_P_to_a = self.idlr_beam.rtP_to_a_2(self.idlr,self.crystal, None, waist_i)
self.sgnl_P_to_a = self.sgnl_beam.rtP_to_a_2(self.sgnl, self.crystal, None, waist_i)
self.pump_P_to_a = self.pump_beam.rtP_to_a_2(self.pump, self.crystal, None, waist_i)
self.overlap_pump = np.sqrt(self.pump_beam.calc_overlap_integral(z_to_focus, self.pump, self.idlr, self.idlr_beam, self.crystal))
self.overlap_sgnl = np.sqrt(self.sgnl_beam.calc_overlap_integral(z_to_focus, self.sgnl, self.idlr, self.idlr_beam, self.crystal))
self.overlap_idlr = 1.0
if self._plot_beam_overlaps and abs(z-self.last_calc_z) > self.crystal.length_mks*0.001:
plt.subplot(131)
plt.plot(z*1e3, np.mean(self.overlap_pump), '.b')
plt.plot(z*1e3, np.mean(self.overlap_sgnl), '.k')
plt.plot(z*1e3, np.mean(self.overlap_idlr), '.r')
plt.subplot(132)
plt.plot(z*1e3, np.mean(waist_p)*1e6, '.b')
plt.plot(z*1e3, np.mean(waist_s)*1e6, '.k')
plt.plot(z*1e3, np.mean(waist_i)*1e6, '.r')
plt.subplot(133)
plt.plot(z*1e3, np.mean(R_p), '.b')
plt.plot(z*1e3, np.mean(R_s), '.k')
plt.plot(z*1e3, np.mean(R_i), '.r')
self.last_calc_z = z
else:
# Life is simple in waveguide mode (for large V number WG)
self.pump_P_to_a = self.pump_beam.rtP_to_a(self.n_p)
self.sgnl_P_to_a = self.sgnl_beam.rtP_to_a(self.n_s)
self.idlr_P_to_a = self.idlr_beam.rtP_to_a(self.n_i)
self.AsAi[:] = np.power(self.phi_p, -1.0)*\
self.fftobject.conv(self.sgnl_P_to_a * self.As(y) * self.phi_s,
self.idlr_P_to_a * self.Ai(y) * self.phi_i)
self.ApAs[:] = np.power(self.phi_i, -1.0)*\
self.fftobject.corr(self.pump_P_to_a * self.Ap(y) * self.phi_p,
self.sgnl_P_to_a * self.As(y) * self.phi_s)
self.ApAi[:] = np.power(self.phi_s, -1.0)*\
self.fftobject.corr(self.pump_P_to_a * self.Ap(y) * self.phi_p,
self.idlr_P_to_a * self.Ai(y) * self.phi_i)
L = self.veclength
# np.sqrt(2 / (c * eps * pi * waist**2)) converts to electric field
#
# From the Seres & Hebling paper,
# das/dz + i k as = F(ap, ai)
# The change of variables is as = As exp[-ikz], so that
#
# das/dz = dAs/dz exp[-ikz] - ik As exp[ikz]
# das/dz + ik as = ( dAs/dz exp[-ikz] - ik As exp[-ikz] ) + i k As exp[-ikz]
# = dAs/dz exp[-ikz]
# The integration is done in the As variables, to remove the fast k
# dependent term. The procedure is:
# 1) Calculate F(ai(Ai), ap(Ap))
# 2) Multiply by exp[+ikz]
# If the chi-3 terms are included:
if not self.disable_SPM:
logging.warn('Warning: this code not updated with correct field-are scaling. Fix it if you use it!')
jpap = self.phi_p**-1 * self.fftobject.conv(self.jl_p, self.Ap(y) * self.phi_p) * \
np.sqrt(2. / (constants.speed_of_light * constants.epsilon_0 * np.pi * waist_p**2))
jsas = self.phi_s**-1 * self.fftobject.conv(self.jl_s, self.As(y) * self.phi_s) * \
np.sqrt(2. / (constants.speed_of_light* constants.epsilon_0 * np.pi * waist_s**2))
jiai = self.phi_i**-1 * self.fftobject.conv(self.jl_i, self.Ai(y) * self.phi_i) * \
np.sqrt(2. / (constants.speed_of_light* constants.epsilon_0 * np.pi * waist_i**2))
dydx[0 :L ] = 1j * 2 * self.AsAi * self.pump.W_mks * deff / (constants.speed_of_light* self.n_p) / \
self.pump_P_to_a -1j * self.pump.w_hz * self.crystal.n2 / (2.*np.pi*self.c) * jpap
dydx[L :2*L] = 1j * 2 * self.ApAi * self.sgnl.W_mks * deff / (constants.speed_of_light* self.n_s) / \
self.sgnl_P_to_a -1j * self.sgnl.w_hz * self.crystal.n2 / (2.*np.pi*self.c) * jsas
dydx[2*L:3*L] = 1j * 2 * self.ApAs * self.idlr.W_mks * deff / (constants.speed_of_light* self.n_i) / \
self.idlr_P_to_a -1j * self.idler.w_hz * self.crystal.n2 / (2.*np.pi*self.c) * jiai
else:
# Only chi-2:
# pump
dydx[0 :L ] = 1j * 2 * self.AsAi * self.pump.W_mks * deff / (constants.speed_of_light * self.n_p) / \
(self.pump_P_to_a)
# signal
dydx[L :2*L] = 1j * 2 * self.ApAi * self.sgnl.W_mks * deff / (constants.speed_of_light * self.n_s) / \
(self.sgnl_P_to_a)
# idler
dydx[2*L:3*L] = 1j * 2 * self.ApAs * self.idlr.W_mks * deff / (constants.speed_of_light * self.n_i) / \
(self.idlr_P_to_a)
def process_stepper_output(self, solver_out):
""" Post-process output of ODE solver.
The saved data from an ODE solved are the pump, signal, and idler in
the dispersionless reference frame. To see the pulses "as they really
are", this dispersion must be added back in.
Parameters
----------
solver_out
Output class instance from ODESolve
Returns
---------
dfg_results
Instance of dfg_results_interface class
"""
npoints = self.veclength
pump_out = solver_out.ysave[0:solver_out.count, 0 : npoints]
sgnl_out = solver_out.ysave[0:solver_out.count, npoints : 2*npoints]
idlr_out = solver_out.ysave[0:solver_out.count, 2*npoints: 3*npoints]
zs = solver_out.xsave[0:solver_out.count]
print ('Pulse velocity is ~ '+str(self.approx_pulse_speed*1e-12)+'mm/fs' )
print('ks: '+str(self.k_p_0)+' '+str(self.k_s_0)+' '+str(self.k_i_0))
pump_pulse_speed = constants.speed_of_light / self.n_p[self._pump_center_idx]
for i in range(solver_out.count):
z = zs[i]
t = z / pump_pulse_speed
phi_p = np.exp(1j * ((self.k_p + self.k_p_0) * z - t * self.pump.W_mks) )
phi_s = np.exp(1j * ((self.k_s + self.k_s_0) * z - t * self.sgnl.W_mks))
phi_i = np.exp(1j * ((self.k_i + self.k_i_0) * z - t * self.idlr.W_mks))
pump_out[i, :] *= phi_p
sgnl_out[i, :] *= phi_s
idlr_out[i, :] *= phi_i
interface = dfg_results_interface(self, pump_out, sgnl_out, idlr_out, zs)
return interface
def format_overlap_plots(self):
plt.subplot(131)
plt.ylabel('Overlap with smallest beam')
plt.xlabel('Crystal length (mm)')
plt.subplot(132)
plt.ylabel('Beam waist (um)')
plt.xlabel('Crystal length (mm)')
plt.subplot(133)
plt.ylabel('Beam curvature (m)')
plt.xlabel('Crystal length (mm)')
class dfg_results_interface:
"""
Interface to output of DFG solver. This class provides a clean way
of working with the DFG field using the Pulse class.
Notes
-----
After initialization, calling::
get_{pump,sgnl,idlr}(n)
will set the dfg results class' "pulse" instance to the appropriate
field and return it.
Example
-------
To plot the 10th saved signal field, you would call::
p = dfg_results_interface.get_sgnl(10-1)
plt.plot(p.T_ps, abs(p.AT)**2 )
To get the actual position (z [meters]) that this corresponds to,
call::
z = dfg_results_interface.get_z(10-1)
"""
n_saves = 0
pump_field = []
sgnl_field = []
idlr_field = []
def __init__(self, integrand_instance, pump, sgnl, idlr, z):
self.pulse = integrand_instance.pump.create_cloned_pulse()
self.pump_wl = integrand_instance.pump.center_wavelength_nm
self.sgnl_wl = integrand_instance.sgnl.center_wavelength_nm
self.idlr_wl = integrand_instance.idlr.center_wavelength_nm
self.pump_field = pump[:]
self.sgnl_field = sgnl[:]
self.idlr_field = idlr[:]
self.pump_max_field = np.max(abs(pump))
self.sgnl_max_field = np.max(abs(sgnl))
self.idlr_max_field = np.max(abs(idlr))
self.pump_max_temporal = np.max(abs(np.fft.fft(pump)))
self.sgnl_max_temporal = np.max(abs(np.fft.fft(sgnl)))
self.idlr_max_temporal = np.max(abs(np.fft.fft(idlr)))
self.zs = z[:]
self.n_saves = len(z)
print('wls: '+str(self.pump_wl)+' '+str(self.sgnl_wl)+' '+str(self.idlr_wl))
def get_z(self, n):
return self.zs[n]
def get_pump(self, n):
self.pulse.set_AW(self.pump_field[n])
self.pulse.set_center_wavelength_nm(self.pump_wl)
return self.pulse
def get_sgnl(self, n):
self.pulse.set_AW(self.sgnl_field[n])
self.pulse.set_center_wavelength_nm(self.sgnl_wl)
return self.pulse
def get_idlr(self, n):
self.pulse.set_AW(self.idlr_field[n])
self.pulse.set_center_wavelength_nm(self.idlr_wl)
return self.pulse
class fftcomputer:
def __init__(self, gridsize):
self.gridsize = gridsize
if PYFFTW_AVAILABLE:
self.corrin = pyfftw.empty_aligned(gridsize*2,'complex128')
self.corrtransfer = pyfftw.empty_aligned(gridsize*2,'complex128')
self.fft = pyfftw.FFTW(self.corrin,self.corrtransfer,direction='FFTW_FORWARD')
self.backout = pyfftw.empty_aligned(gridsize*2,'complex128')
self.ifft = pyfftw.FFTW(self.corrtransfer,self.backout,direction='FFTW_BACKWARD')
else:
self.corrin = np.zeros( (gridsize*2,), dtype = np.complex128)
self.corrtransfer = np.zeros( (gridsize*2,), dtype = np.complex128)
self.backout = np.zeros( (gridsize*2,), dtype = np.complex128)
def pyfft_style_fft():
self.corrtransfer[:] = np.fft.fft(self.corrin)
return self.corrtransfer
def pyfft_style_ifft():
self.backout[:] = np.fft.ifft(self.corrtransfer)
return self.backout
self.fft = pyfft_style_fft
self.ifft = pyfft_style_ifft
def corr(self, data1, data2):
n = self.gridsize
self.corrin[:] = 0
self.corrin[:n] = data2
temp = np.conjugate(np.copy(self.fft()))
self.corrin[:] = 0
self.corrin[:n] = data1
ans = self.fft()
ans[:] = ans*temp
return fftpack.ifftshift(np.copy(self.ifft()))[(n>>1):n+(n>>1)]
def conv(self, resp, sig):
n = self.gridsize
self.corrin[:] = 0
self.corrin[n:] = resp
temp = np.copy(self.fft())
self.corrin[:] = 0
self.corrin[:n] = sig
ans = self.fft()
ans[:] = ans*temp
return fftpack.ifftshift(np.copy(self.ifft()))[(n>>1):n+(n>>1)]
| gpl-3.0 |
cowlicks/blaze | blaze/compute/tests/test_mysql_compute.py | 3 | 1619 | from __future__ import absolute_import, print_function, division
from getpass import getuser
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('pymysql')
from odo import odo, drop, discover
import pandas as pd
from blaze import symbol, compute
from blaze.utils import example, normalize
@pytest.yield_fixture(scope='module')
def data():
try:
t = odo(
example('nyc.csv'),
'mysql+pymysql://%s@localhost/test::nyc' % getuser()
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t.bind
finally:
drop(t)
@pytest.fixture
def db(data):
return symbol('test', discover(data))
def test_agg_sql(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data)
expected = """
select
min(alias.passenger_count) as passenger_count_min
from
(select
nyc.passenger_count as passenger_count
from
nyc
where nyc.passenger_count < %s) as alias
"""
assert normalize(str(result)) == normalize(expected)
def test_agg_compute(db, data):
subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']]
expr = subset[subset.passenger_count < 4].passenger_count.min()
result = compute(expr, data)
passenger_count = odo(compute(db.nyc.passenger_count, {db: data}), pd.Series)
assert passenger_count[passenger_count < 4].min() == result.scalar()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_toolkits/axes_grid/examples/demo_axes_rgb.py | 8 | 1876 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes, RGBAxes
def get_demo_image():
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
def get_rgb():
Z, extent = get_demo_image()
Z[Z<0] = 0.
Z = Z/Z.max()
R = Z[:13,:13]
G = Z[2:,2:]
B = Z[:13,2:]
return R, G, B
def make_cube(r, g, b):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
return R, G, B, RGB
def demo_rgb():
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax_r, ax_g, ax_b = make_rgb_axes(ax, pad=0.02)
#fig.add_axes(ax_r)
#fig.add_axes(ax_g)
#fig.add_axes(ax_b)
r, g, b = get_rgb()
im_r, im_g, im_b, im_rgb = make_cube(r, g, b)
kwargs = dict(origin="lower", interpolation="nearest")
ax.imshow(im_rgb, **kwargs)
ax_r.imshow(im_r, **kwargs)
ax_g.imshow(im_g, **kwargs)
ax_b.imshow(im_b, **kwargs)
def demo_rgb2():
fig = plt.figure(2)
ax = RGBAxes(fig, [0.1, 0.1, 0.8, 0.8], pad=0.0)
#fig.add_axes(ax)
#ax.add_RGB_to_figure()
r, g, b = get_rgb()
kwargs = dict(origin="lower", interpolation="nearest")
ax.imshow_rgb(r, g, b, **kwargs)
ax.RGB.set_xlim(0., 9.5)
ax.RGB.set_ylim(0.9, 10.6)
for ax1 in [ax.RGB, ax.R, ax.G, ax.B]:
for sp1 in ax1.spines.values():
sp1.set_color("w")
for tick in ax1.xaxis.get_major_ticks() + ax1.yaxis.get_major_ticks():
tick.tick1line.set_mec("w")
tick.tick2line.set_mec("w")
return ax
demo_rgb()
ax = demo_rgb2()
plt.show()
| mit |
jpinedaf/pyspeckit | examples/gbt_sdfits_example.py | 8 | 2005 | from __future__ import print_function
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
from pylab import *;import numpy,scipy,matplotlib;
import pyspeckit
from pyspeckit.spectrum.readers import gbt
sdfitsfile = '/Users/adam/observations/gbt/h2co_pilot/AGBT09C_049_02.raw.acs.fits'
gbtdata = pyfits.open(sdfitsfile)
objectname = 'G32.80+0.19'
bintable=gbtdata[1]
A049 = gbt.GBTSession(bintable)
whobject = bintable.data['OBJECT'] == objectname
blocks = A049.load_target(objectname).blocks
onMoff1 = blocks['A9OFF1'] - blocks['A9OFF2']
onMoff1on = blocks['A9ON1'] - blocks['A9ON2']
onoffs = dict((name[:-1],blocks[name[:-1]+'1'] - blocks[name[:-1]+'2']) for name in blocks if name[-1]=='1')
av1 = onMoff1.average()
av1.plotter()
av2 = onMoff1on.average()
av2.plotter(axis=av1.plotter.axis,clear=False,color='b')
calonA9 = blocks['A9ON1'].average()
caloffA9 = blocks['A9OFF1'].average()
print("tsys: ",gbt.dcmeantsys(calonA9, caloffA9, calonA9.header['TCAL']))
G32 = reduced_nods = A049.reduce_target(objectname, verbose=True)
print(G32)
G32.average_IFs(debug=True)
G32.average_pols()
G32.spectra['if0'].plotter()
G37 = A049.reduce_target('G37.87-0.40')
print(G37)
G37.average_IFs(debug=True)
G37.spectra['if0'].plotter()
name = 'A9'
num = '1'
av2 = blocks[name+'OFF'+num].average(debug=True)
#while av2.data.mean() > 0.01:
# av2 = blocks[name+'OFF'+num].average(debug=True)
# print(av2)
colors = ['k','b','r','g']
for fd,col in zip(['A9','A13','C25','C29'],colors):
G32.reduced_scans[fd].plotter(figure=figure(4), clear=False,color=col)
G32['if1'].plotter(figure=figure(4), color='magenta', clear=False)
G32['if1fd1'].plotter(figure=figure(4), color='orange', clear=False)
G32['if1fd2'].plotter(figure=figure(4), color='cyan', clear=False)
G32b = pyspeckit.Spectrum('/Users/adam/work/h2co/data/pilot/G32.80+0.19_h2co_Jy.fits')
G32b.xarr.convert_to_unit('Hz')
((G32b+G32b.header['CONTINUU'])*1.91).plotter(figure=figure(4),clear=False,color='purple')
| mit |
bmazin/ARCONS-pipeline | quicklook/h5quicklook.py | 1 | 34191 | #-----------------------------------
# h5quicklook_v10.py
#
# Written by Seth Meeker 07/16/11
#
# standalone program for generating images and pulse height histograms from h5 observation files
# inherits most of its functionality from the arcons GUI's quicklook imaging
# version 6 updated to subtract dark file
# version 7 updated to reimplement different aperture selections and plot histograms with dark counts subtracted
# version 9 updated to standardize orientation of images
# version 10 updated to display images from arbitrarily sized arrays
#
# KNOWN BUG: does not properly rescale histogram when multiple spectra are plotted. Doesn't matter though
# since it makes no sense to plot multiple pixels while the spectra are uncalibrated.
#
# ----------------------------------
#import standard python libraries
import sys
import time
import struct
import os
from os.path import isfile
#import installed libraries
from matplotlib import pylab
from matplotlib import pyplot as plt
from numpy import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from tables import *
#import my functions
from h5quicklook_v8_gui import Ui_h5quicklook
c = 3.0E17 #[nm/s]
h = 4.13567E-15 #[ev*s]
bmapFromFile = False
class StartQt4(QMainWindow):
def __init__(self,parent=None):
QWidget.__init__(self, parent)
self.ui = Ui_h5quicklook()
self.ui.setupUi(self)
#set some important permanent parameters (array size, energy range, directories)
#self.nxpix = 32
#self.nypix = 32
#self.total_pix = self.nxpix*self.nypix
#self.imagex = self.ui.tv_image.width()
#self.imagey = self.ui.tv_image.height()
self.imagex = 308
self.imagey = 322
self.position = self.ui.tv_image.pos()
self.Emin = 0.92 #eV corresponds to 1350 nm
self.Emax = 3.18 #eV corresponds to 390 nm (changed from original 4.2 ev)
self.bintype = "wavelength"
self.pix_select_mode = "rect"
#store list of pixels we want in our histogram
self.histogram_pixel = []
self.plot_type = "bars" #change to "points" for histogram plotted as point scatter, anything else for bars
#read in bad pixels from file so they can be "X"d out in the quicklook
self.bad_pix = []
self.flatfile = ""
self.darkfile = ""
self.skyfile = ""
try:
self.beammapfile = os.environ['MKID_BEAMMAP_PATH']#"beamimage.h5"
except KeyError:
self.beammapfile = ""
#load beam map from default beammap directory
if bmapFromFile == True:
self.loadbeammap()
#selection of bins is disabled. Now automatically makes bins the same as the data range for 1 to 1 binning.
self.ui.nbins.setEnabled(False)
#use mouse to select pixel from tv_image, also triggers a new spectrum to be displayed
self.ui.tv_image.mousePressEvent = self.start_pixel_select
self.ui.tv_image.mouseReleaseEvent = self.end_pixel_select
#signal from image thread to make spectrum
#QObject.connect(self.image_thread,SIGNAL("new_spectrum"),self.display_spectra)
self.ui.histogram_plot.canvas.xtitle = "Pulse Heights"
self.ui.histogram_plot.canvas.format_labels()
QObject.connect(self.ui.plot_pushButton, SIGNAL("clicked()"), self.make_plots)
#button signals
QObject.connect(self.ui.mode_buttonGroup, SIGNAL("buttonClicked(int)"), self.set_pix_select_mode)
QObject.connect(self.ui.browse_button, SIGNAL("clicked()"), self.file_dialog)
QObject.connect(self.ui.displayimage_button, SIGNAL("clicked()"), self.make_image)
QObject.connect(self.ui.saveimage_button,SIGNAL("clicked()"), self.save_image)
QObject.connect(self.ui.time_up, SIGNAL("clicked()"), self.time_up)
QObject.connect(self.ui.time_down, SIGNAL("clicked()"), self.time_down)
QObject.connect(self.ui.choosedark,SIGNAL("clicked()"), self.choosedark)
QObject.connect(self.ui.choosesky,SIGNAL("clicked()"), self.choosesky)
QObject.connect(self.ui.histogram_radio, SIGNAL("toggled(bool)"), self.resize_window)
#QObject.connect(self.ui.satpercent, SIGNAL("valueChanged(double)"), self.make_image)
#open dialog to select data path
def resize_window(self):
#if spectrum options button is clicked resize window to show/hide options
if self.ui.histogram_radio.isChecked():
self.resize(850,1060)
else:
self.resize(850,750)
def file_dialog(self):
self.newdatafile = QFileDialog.getOpenFileName(parent=None, caption=QString(str("Choose Obs File")),directory = ".",filter=QString(str("H5 (*.h5)")))
if len(self.newdatafile)!=0:
self.datafile = self.newdatafile
splitname = self.datafile.split("/")
justname = splitname[-1]
self.ui.filename_label.setText(str(justname))
self.blankscene = QGraphicsScene()
self.blankscene.clear()
self.ui.tv_image.setScene(self.blankscene)
self.ui.tv_image.show()
self.ui.histogram_plot.canvas.ax.clear()
self.ui.histogram_plot.canvas.format_labels()
self.ui.histogram_plot.canvas.draw()
if bmapFromFile == False:
self.loadObsBmap()
self.display_obs_time()
self.display_header()
self.get_ut()
def choosedark(self):
self.darkfile = QFileDialog.getOpenFileName(parent=None, caption=QString(str("Choose Dark File")),directory = ".",filter=QString(str("H5 (*.h5)")))
self.darkfile = str(self.darkfile)
print "loading darkfile",str(self.darkfile)
def choosesky(self):
self.skyfile = QFileDialog.getOpenFileName(parent=None, caption=QString(str("Choose Sky File")),directory = ".",filter=QString(str("H5 (*.h5)")))
self.skyfile = str(self.skyfile)
print "loading skyfile",str(self.skyfile)
def save_image(self):
self.savefile = QFileDialog.getSaveFileName(parent=None, caption=QString(str("Save File")), directory = ".")
os.rename(self.imfile, self.savefile)
def loadObsBmap(self):
h5file = openFile(str(self.datafile), mode = "r")
self.bmap = h5file.root.beammap.beamimage.read()
self.nxpix = shape(self.bmap)[1]
self.nypix = shape(self.bmap)[0]
h5file.close()
print "Loaded beammap from obs file"
def get_ut(self):
#try:
h5file = openFile(str(self.datafile), mode = "r")
htable = h5file.root.header.header.read()
h5address = h5file.root.beammap.beamimage.read()[0][0]
h5time = int(h5address.split('t')[1])
try:
self.ut = int(htable["unixtime"])
except ValueError:
print "unixtime not found, checking for deprecated ut field"
self.ut = int(htable["ut"])
if self.ut != h5time:
self.ut = h5time
h5file.close()
for i in xrange(self.nypix):
for j in xrange(self.nxpix):
head = str(self.bmap[i][j])
if head.split('t')[0] == '':
self.bmap[i][j] = head + 't' + str(self.ut)
else:
self.bmap[i][j] = head.split('t')[0] + 't' + str(self.ut)
print "Pixel addresses updated in beammap"
#except:
#print "Unable to get UT from header. Beammap will not have correct time in pixel strings"
def display_obs_time(self):
try:
h5file = openFile(str(self.datafile), mode = "r")
htable = h5file.root.header.header.read()
obstime = int(htable["exptime"])
self.ui.obstime_lcd.display(obstime)
self.ui.endtime_spinbox.setValue(obstime)
h5file.close()
self.ui.increment.setMaximum(obstime)
self.ui.endtime_spinbox.setMaximum(obstime)
self.ui.starttime_spinbox.setMaximum(obstime-1)
except:
print "Unable to load Header, checking beammap"
h5file = openFile(str(self.datafile), mode = "r")
bmap = h5file.root.beammap.beamimage.read()
#bmap = rot90(bmap,2)
h5file.close()
self.nxpix = shape(bmap)[1]
self.nypix = shape(bmap)[0]
self.scalex=float(self.imagex/float(self.nxpix))
self.scaley=float(self.imagey/float(self.nypix))
#if self.nxpix > self.nypix:
# self.scaley = self.scaley * float(self.nypix/float(self.nxpix))
#else:
# self.scalex = self.scalex * float(self.nxpix/float(self.nypix))
self.bad_pix = []
tempbmap = reshape(bmap,self.nxpix*self.nypix)
for i in range(self.nxpix*self.nypix):
if len(concatenate(h5file.root._f_getChild(tempbmap[i])[:])) == 0:
self.bad_pix.append(i)
for i in range(self.nxpix):
for j in range(self.nypix):
try:
photons= h5file.root._f_getChild(bmap[i][j]).read()
obstime = len(photons)
self.ui.obstime_lcd.display(obstime)
self.ui.endtime_spinbox.setValue(obstime)
self.ui.increment.setMaximum(obstime)
self.ui.endtimespinbox.setMaximum(obstime)
self.ui.starttimespinbox.setMaximum(obstime-1)
return
except NoSuchNodeError:
continue
print "unable to find any pixels with data. Check beammap is present"
def display_header(self):
h5file = openFile(str(self.datafile), mode = "r")
try:
header = h5file.root.header.header
self.ui.header_info.clear()
#titles = ['LOfreq', 'airmass', 'alt', 'az', 'beammap', 'calfile', 'datadir', 'dec', 'epoch', 'equinox', 'exptime', 'focus', 'inatten', 'instrument', 'jd', 'localtime', 'lst', 'obsalt', 'obslat', 'obslong', 'outatten','parallactic', 'platescl', 'ra', 'target', 'telescope', 'timezone', 'ut']
titles = header.colnames
info = header[0]
for i in range(len(titles)):
self.ui.header_info.append(titles[i] + ":\n" + str(info[i]) + "\n")
except NoSuchNodeError:
self.ui.header_info.clear()
self.ui.header_info.append('No header info')
h5file.close()
def loadbeammap(self):
bmfile = openFile(self.beammapfile, 'r')
#read beammap in to memory to create beam image
self.bmap = bmfile.root.beammap.beamimage.read()
#print self.bmap[0][0]
self.nxpix = shape(self.bmap)[1]
self.nypix = shape(self.bmap)[0]
#print self.nxpix
#print self.nypix
#self.bmap = rot90(self.bmap)
#self.bmap = flipud(self.bmap)
bmfile.close()
print "Beammap loaded from " +str(self.beammapfile)
def set_dark_sub(self):
#Use radio button to set if we want sky subtraction to be on or off
if self.ui.darksub_checkbox.isChecked():
self.dark_subtraction = True
else:
self.dark_subtraction = False
def set_sky_sub(self):
#Use radio button to set if we want sky subtraction to be on or off
if self.ui.skysub_checkbox.isChecked():
self.sky_subtraction = True
else:
self.sky_subtraction = False
def set_flat_sub(self):
pass
def set_image_color(self):
if self.ui.color_checkbox.isChecked():
self.color = True
else:
self.color = False
def unpack_file(self, file):
f = open(file, 'rb')
inputdata = f.read()
f.close()
numbers = struct.unpack((self.nxpix*self.nypix)*'10I', inputdata)
darray = reshape(numbers,((self.nxpix*self.nypix),10))
return darray
def make_image(self):
print "Generating Image..."
bmap = self.bmap
self.set_dark_sub()
#self.set_image_color()
self.set_flat_sub()
self.set_sky_sub()
if self.ui.darksub_checkbox.isChecked():
self.dark_subtraction = True
if self.ui.skysub_checkbox.isChecked():
self.sky_subtraction = True
nphot=0
ti = self.ui.starttime_spinbox.value()
tf = self.ui.endtime_spinbox.value()
if ti>tf:
copyti = ti
ti=tf
tf=copyti
print "WARNING: selected ti > tf. They were switched for you."
h5file = openFile(str(self.datafile), 'r')
#bmap = h5file.root.beammap.beamimage.read()
#bmap = rot90(bmap,2)
self.nxpix = shape(bmap)[1]
self.nypix = shape(bmap)[0]
self.scalex=float(self.imagex/float(self.nxpix))
self.scaley=float(self.imagey/float(self.nypix))
#if self.nxpix > self.nypix:
# self.scaley = self.scaley * float(self.nypix/float(self.nxpix))
#else:
# self.scalex = self.scalex * float(self.nxpix/float(self.nypix))
all_photons = []
for j in range(self.nxpix*self.nypix):
all_photons.append([])
if self.dark_subtraction == True:
darkrate = zeros((self.nypix,self.nxpix))
darkh5 = openFile(str(self.darkfile), 'r')
darkbmap = darkh5.root.beammap.beamimage.read()
#darkbmap = rot90(darkbmap,2)
if self.sky_subtraction == True:
skyrate = zeros((self.nypix,self.nxpix))
skyh5 = openFile(str(self.skyfile), 'r')
skybmap = skyh5.root.beammap.beamimage.read()
#skybmap = rot90(skybmap,2)
#for now set dark and sky bmaps to same as observation beammap. Should usually be the case
skybmap = self.bmap
darkbmap = self.bmap
counts = zeros((self.nypix,self.nxpix))
for i in xrange(self.nypix):
for j in xrange(self.nxpix):
if bmap[i][j] == '':
counts[i][j]=0
continue
try:
photons = concatenate(h5file.root._f_getChild(bmap[i][j])[ti:tf])
counts[i][j]=len(photons)
if self.dark_subtraction == True:
dtime = float(len(darkh5.root._f_getChild(darkbmap[i][j])))
photons= concatenate(darkh5.root._f_getChild(darkbmap[i][j])[:])
darkcounts = len(photons)
#darkheights= right_shift(photons,32)%4096
if darkcounts==0:
pass
else:
counts[i][j] -= (darkcounts*(tf-ti)/float(dtime))
#print "dark subtracted..."
else:
pass
if counts[i][j]<0:
counts[i][j]=0
if self.sky_subtraction == True:
if self.dark_subtraction != True:
stime = float(len(skyh5.root._f_getChild(skybmap[i][j])))
photons= concatenate(skyh5.root._f_getChild(skybmap[i][j])[:])
skycounts = len(photons)
if skycounts==0:
pass
else:
counts[i][j] -= (skycounts*(tf-ti)/float(stime))
else:
stime = float(len(skyh5.root._f_getChild(skybmap[i][j])))
photons= concatenate(skyh5.root._f_getChild(skybmap[i][j])[:])
skycounts = len(photons)
if skycounts==0:
pass
else:
skycounts -= (darkcounts*(stime)/float(dtime))
#print skysubtracted
if skycounts <0:
skycounts=0
counts[i][j] -= (skycounts*(tf-ti)/float(stime))
else:
pass
if counts[i][j]<0:
counts[i][j]=0
except NoSuchNodeError:
counts[i][j]=0
#totalhist += subtracted
nphot += counts[i][j]
photon_count = counts
im = photon_count
photon_count = flipud(photon_count)
if self.ui.man_contrast.isChecked():
self.vmax = self.ui.vmax.value()
self.vmin = self.ui.vmin.value()
else:
indices = sort(reshape(photon_count,self.nxpix*self.nypix))
brightest = int((self.ui.satpercent.value()/100.0)*(self.nxpix*self.nypix))
self.vmax = indices[(-1*brightest)]
self.vmin = 0
fig = plt.figure(figsize=(0.01*self.nxpix,0.01*self.nypix), dpi=100, frameon=False)
im = plt.figimage(photon_count, cmap='gray', vmin = self.vmin, vmax = self.vmax)
self.imfile = "TV_frame.png"
plt.savefig(self.imfile, pad_inches=0)
if self.dark_subtraction == True:
darkh5.close()
if self.sky_subtraction == True:
skyh5.close()
h5file.close()
print "done making image."
self.display_image()
if len(self.histogram_pixel) != 0:
self.make_plots()
def make_plots(self):
if self.ui.histogram_radio.isChecked():
self.plot_histogram()
elif self.ui.timestream_radio.isChecked():
self.plot_timestream()
else:
print "Please select plot type"
pass
def time_up(self):
ti = self.ui.starttime_spinbox.value()
tf = self.ui.endtime_spinbox.value()
ti += self.ui.increment.value()
tf += self.ui.increment.value()
self.ui.starttime_spinbox.setValue(ti)
self.ui.endtime_spinbox.setValue(tf)
self.make_image()
def time_down(self):
ti = self.ui.starttime_spinbox.value()
tf = self.ui.endtime_spinbox.value()
ti -= self.ui.increment.value()
tf -= self.ui.increment.value()
self.ui.starttime_spinbox.setValue(ti)
self.ui.endtime_spinbox.setValue(tf)
self.make_image()
def makepixmap(self, imagefile, scalex=1, scaley=1):
'''Given an image file this function converts them to pixmaps to be displayed by QT gui'''
qtimage = QImage(imagefile)
width, height = qtimage.size().width(), qtimage.size().height()
qtimage = qtimage.scaled(width*scalex,height*scaley)
pix = QPixmap.fromImage(qtimage)
return pix
def display_image(self):
#search directory for image
self.imagefile = "./TV_frame.png"
self.ui.tv_image.setGeometry(self.position.x(), self.position.y()-8, self.scalex*(self.nxpix)+4, self.scaley*(self.nypix)+4)
#print self.nxpix
#print self.nypix
#convert to pixmap
if isfile(self.imagefile):
pix = self.makepixmap(self.imagefile, scalex=self.scalex, scaley=self.scaley)
#display pixmap
self.scene = QGraphicsScene()
self.scene.addPixmap(pix)
### BAD PIXELS AND SPECTRA SELECTION NOT IMPLEMENTED ###
if self.ui.bad_pix.isChecked() == True:
for bp in self.bad_pix:
x = bp%(self.nxpix)
y = (self.nypix-1) - bp/(self.nxpix)
self.scene.addLine(self.scalex*x,self.scaley*y,self.scale*x+(self.scalex),self.scaley*y+(self.scaley),Qt.red)
self.scene.addLine(self.scalex*x,self.scaley*y+(self.scaley),self.scalex*x+(self.scalex),self.scaley*y,Qt.red)
if self.histogram_pixel != []:
for p in self.histogram_pixel:
x = p%(self.nxpix)
y = (self.nypix-1) - p/self.nxpix
self.scene.addRect(self.scalex*(x),self.scaley*(y),(self.scalex),(self.scaley), Qt.blue)
self.ui.tv_image.setScene(self.scene)
self.ui.tv_image.show()
#os.remove(str(imagefile)) #want to keep this around for saving purposes
else:
self.blankscene = QGraphicsScene()
self.blankscene.clear()
self.ui.tv_image.setScene(self.blankscene)
self.ui.tv_image.show()
#def set_plot_mode(self):
#change between spectra plots and signal to noise ratio plots
#if self.ui.plot_snr_radioButton.isChecked():
#self.image_thread.set_plot_mode("snr")
#self.ui.spectra_plot.canvas.ytitle = "Signal to Noise"
#else:
#self.image_thread.set_plot_mode("spectra")
#self.ui.spectra_plot.canvas.ytitle = "Counts"
#pass
def set_pix_select_mode(self):
if self.ui.drag_select_radioButton.isChecked():
self.pix_select_mode = "drag"
elif self.ui.rect_select_radioButton.isChecked():
self.pix_select_mode = "rect"
elif self.ui.circ_select_radioButton.isChecked():
self.pix_select_mode = "circ"
def start_pixel_select(self,event):
#Mouse press returns x,y position of first pixel to be used in spectra
self.startrawx,self.startrawy = event.pos().x(), event.pos().y()
self.startpx = int(self.startrawx/self.scalex)
self.startpy = int((self.nypix) - self.startrawy/self.scaley)
self.startpix = self.nxpix*self.startpy+self.startpx
def end_pixel_select(self,event):
#Mouse release returns x,y position of last pixel to be used in spectra
self.endrawx,self.endrawy = event.pos().x(), event.pos().y()
self.endpx = int(self.endrawx/self.scalex)
self.endpy = int((self.nypix) - self.endrawy/self.scaley)
self.endpix = self.nxpix*self.endpy+self.endpx
self.pixel_list()
def pixel_list(self):
#if click and drag selection is on, add new pixels to the list of all pixels being plotted
if self.pix_select_mode == "drag":
if self.startpix != self.endpix:
#get all pix in box
allx = range(min(self.startpx,self.endpx),max(self.startpx, self.endpx)+1)
ally = range(min(self.startpy,self.endpy),max(self.startpy, self.endpy)+1)
pix = []
for x in allx:
for y in ally:
pix.append(y*self.nxpix+x)
#pix.append((31-y)*32+x)
else:
pix = [self.startpix]
elif self.pix_select_mode == "rect":
#get all pix in box
length = self.ui.rect_x_spinBox.value()
height = self.ui.rect_y_spinBox.value()
allx = range(self.startpx-int(ceil(length/2.0)-1),self.startpx+int(floor(length/2.0))+1)
ally = range(self.startpy-int(ceil(height/2.0)-1),self.startpy+int(floor(height/2.0))+1)
self.histogram_pixel = [] #clear spectrum array
pix=[]
#self.histogram_pixel.append(self.startpix)
for x in allx:
for y in ally:
pix.append(y*self.nxpix+x)
#pix.append((31-y)*32+x)
elif self.pix_select_mode == "circ":
r = self.ui.circ_r_spinBox.value()
length = 2*r
height = length
allx = range(self.startpx-int(ceil(length/2.0)),self.startpx+int(floor(length/2.0))+1)
ally = range(self.startpy-int(ceil(height/2.0)),self.startpy+int(floor(height/2.0))+1)
self.histogram_pixel = [] #clear spectrum array
pix = []
for x in allx:
for y in ally:
if (abs(x-self.startpx))**2+(abs(y-self.startpy))**2 <= (r)**2:
pix.append(y*self.nxpix+x)
#pix.append((31-y)*32+x)
for element in pix:
#check for repeated pixels (clicked for deselection) and out of bounds pixels, remove from total array
if element not in self.histogram_pixel:
if element >= 0 and element <= (self.nxpix*self.nypix-1):# and element not in self.bad_pix:
self.histogram_pixel.append(element)
else:
spot = self.histogram_pixel.index(element)
del(self.histogram_pixel[spot])
#if self.observing == False:
#self.image_thread.update_spectrum(self.bindir)
self.display_image()
self.make_plots()
def plot_timestream(self):
self.set_dark_sub()
self.set_sky_sub()
self.set_flat_sub()
if self.histogram_pixel != []:
nphot=0
ti = self.ui.starttime_spinbox.value()
tf = self.ui.endtime_spinbox.value()
if ti>tf:
copyti = ti
ti=tf
tf=copyti
print "WARNING: selected ti > tf. They were switched for you."
counts = zeros(tf-ti)
subtracted = zeros(tf-ti)
timesteps = xrange(ti,tf)
h5file = openFile(str(self.datafile), 'r')
#bmap = h5file.root.beammap.beamimage.read()
#bmap = rot90(bmap,2)
bmap = self.bmap
if self.dark_subtraction == True:
darkrate = zeros((self.nypix,self.nxpix))
darkh5 = openFile(str(self.darkfile), 'r')
darkbmap = darkh5.root.beammap.beamimage.read()
#darkbmap = rot90(darkbmap,2)
if self.sky_subtraction == True:
skyrate = zeros((self.nypix,self.nxpix))
skyh5 = openFile(str(self.skyfile), 'r')
skybmap = skyh5.root.beammap.beamimage.read()
#skybmap = rot90(skybmap,2)
darkbmap = self.bmap
for t in xrange(tf-ti):
for i in xrange(self.nypix):
for j in xrange(self.nxpix):
if i*self.nxpix+j in self.histogram_pixel:
self.ui.pixelpath.setText(str(bmap[i][j]))
if bmap[i][j] == '':
subtracted[t] += 0
continue
try:
counts[t] += len(h5file.root._f_getChild(bmap[i][j])[ti+t])
if self.dark_subtraction == True:
dtime = float(len(darkh5.root._f_getChild(darkbmap[i][j])))
darkcounts= len(concatenate(darkh5.root._f_getChild(darkbmap[i][j])[:]))
darkrate= darkcounts/dtime
if darkcounts==0:
subtracted[t] = counts[t]
else:
subtracted[t] = counts[t]-darkrate
else:
subtracted[t] = counts[t]
for p in range(len(subtracted)):
if subtracted[p]<0:
subtracted[p]=0
if self.sky_subtraction == True:
if self.dark_subtraction != True:
stime = float(len(skyh5.root._f_getChild(skybmap[i][j])))
skycounts = len(concatenate(skyh5.root._f_getChild(skybmap[i][j])[:]))
skyrate = skycounts/stime
if skycounts==0:
pass
else:
subtracted[t] = subtracted[t]-skyrate
else:
stime = float(len(skyh5.root._f_getChild(skybmap[i][j])))
skycounts = len(concatenate(skyh5.root._f_getChild(skybmap[i][j])[:]))
skyrate = skycounts/stime
if skycounts==0:
pass
else:
subtracted[t] = subtracted[t] - (skyrate-darkrate)
else:
pass
counts[t] = int(subtracted[t])
if counts[t]<0:
counts[t]=0
except NoSuchNodeError:
counts[t]=0
print "plotting timestream of ", tf-ti, " seconds"
self.ui.countlabel.setText(str(sum(counts)))
self.ui.histogram_plot.canvas.ax.clear()
#if self.plot_type == "point":
#photon_hist = histogram(new_photons, bins = nbins, range = (min(new_photons), max(new_photons)))
#self.ui.histogram_plot.canvas.ax.plot(photon_hist[1][1:],photon_hist[0],'o')
#else:
#self.ui.histogram_plot.canvas.ax.hist(new_photons, bins = nbins, range = (min(new_photons),max(new_photons)), histtype='bar')
self.ui.histogram_plot.canvas.ax.plot(timesteps,counts)
#self.ui.histogram_plot.canvas.format_labels()
self.ui.histogram_plot.canvas.draw()
if self.dark_subtraction == True:
darkh5.close()
if self.sky_subtraction == True:
skyh5.close()
h5file.close()
print "done"
def plot_histogram(self):
self.set_dark_sub()
self.set_sky_sub()
self.set_flat_sub()
if self.histogram_pixel != []:
nphot=0
ti = self.ui.starttime_spinbox.value()
tf = self.ui.endtime_spinbox.value()
if ti>tf:
copyti = ti
ti=tf
tf=copyti
print "WARNING: selected ti > tf. They were switched for you."
h5file = openFile(str(self.datafile), 'r')
#bmap = h5file.root.beammap.beamimage.read()
#bmap = rot90(bmap,2)
bmap = self.bmap
all_photons = []
for j in range(self.nxpix*self.nypix):
all_photons.append([])
if self.dark_subtraction == True:
darkrate = zeros((self.nypix,self.nxpix))
darkh5 = openFile(str(self.darkfile), 'r')
darkbmap = darkh5.root.beammap.beamimage.read()
#darkbmap = rot90(darkbmap,2)
if self.sky_subtraction == True:
skyrate = zeros((self.nypix,self.nxpix))
skyh5 = openFile(str(self.skyfile), 'r')
skybmap = skyh5.root.beammap.beamimage.read()
#skybmap = rot90(skybmap,2)
darkbbmap = self.bmap
counts = zeros((self.nypix,self.nxpix))
bins = range(self.ui.nbins.value()+1)
m=-1
for i in xrange(self.nypix):
for j in xrange(self.nxpix):
if i*self.nxpix+j in self.histogram_pixel:
m+=1
self.ui.pixelpath.setText(str(bmap[i][j]))
if bmap[i][j] == '':
counts[i][j]=0
subtracted1 = zeros((self.ui.nbins.value()),dtype=float)
subtracted2 = zeros((self.ui.nbins.value()),dtype=float)
continue
try:
#etime = len(h5file.root._f_getChild(bmap[i][j]))
photons= concatenate(h5file.root._f_getChild(bmap[i][j])[ti:tf])
peakheights= right_shift(photons,32)%4096
parabheights= right_shift(photons,44)%4096
#npparabheights = array(parabheights, dtype=float)
baseline= right_shift(photons,20)%4096
#npbaseline = array(baseline, dtype=float)
#obsheights = npbaseline-npparabheights
#obsheights = baseline
#for l in xrange(10):
#print peakheights[l], parabheights[l], baseline[l]
if self.ui.topplot.currentText() == "Parabola Fit":
obs1heights = array(parabheights,dtype=float)
elif self.ui.topplot.currentText() == "Baseline":
obs1heights = array(baseline,dtype=float)
else:
obs1heights = array(peakheights,dtype=float)
if self.ui.bottomplot.currentText() == "Parabola Fit":
obs2heights = array(parabheights,dtype=float)
elif self.ui.bottomplot.currentText() == "Peak Height":
obs2heights = array(peakheights,dtype=float)
else:
obs2heights = array(baseline,dtype=float)
#for l in xrange(10):
#print peakheights[l], parabheights[l], baseline[l],"\n",obs1heights[l], obs2heights[l]
if self.ui.checkBox.isChecked():
obs1heights -= array(baseline,dtype=float)
if self.ui.checkBox_2.isChecked():
obs2heights -= array(baseline,dtype=float)
nbins1 = obs1heights.max() - obs1heights.min()
nbins2 = obs2heights.max() - obs2heights.min()
self.ui.nbins.setValue(nbins1)
totalhist1 = zeros((nbins1),dtype=float)
totalhist2 = zeros((nbins2),dtype=float)
if len(obs1heights)==0:
counts[i][j]=0
continue
else:
obs1hist,bins1 = histogram(obs1heights,bins=nbins1,range=(obs1heights.min(),obs1heights.max()))
obs2hist,bins2 = histogram(obs2heights,bins=nbins2,range=(obs2heights.min(),obs2heights.max()))
#print bins
if self.dark_subtraction == True:
dtime = float(len(darkh5.root._f_getChild(darkbmap[i][j])))
photons= concatenate(darkh5.root._f_getChild(darkbmap[i][j])[:])
darkheights= right_shift(photons,32)%4096
if len(darkheights)==0:
subtracted1 = obs1hist
subtracted2 = obs2hist
dark1hist = zeros((self.ui.nbins.value()))
dark2hist = zeros((self.ui.nbins.value()))
else:
dark1hist,bins1 = histogram(darkheights,bins=nbins1,range=(obs1heights.min(),obs1heights.max()))
subtracted1 = obs1hist-(dark1hist*(tf-ti)/float(dtime))
dark2hist,bins2 = histogram(darkheights,bins=nbins2,range=(obs2heights.min(),obs2heights.max()))
subtracted2 = obs2hist-(dark2hist*(tf-ti)/float(dtime))
else:
subtracted1=obs1hist
subtracted2=obs2hist
#for m in xrange(len(obs1hist))
for p in range(len(subtracted1)):
if subtracted1[p]<0:
subtracted1[p]=0
for p in range(len(subtracted2)):
if subtracted2[p]<0:
subtracted2[p]=0
if self.sky_subtraction == True:
if self.dark_subtraction != True:
stime = float(len(skyh5.root._f_getChild(skybmap[i][j])))
photons = concatenate(skyh5.root._f_getChild(skybmap[i][j])[:])
skyheights = right_shift(photons,32)%4096
if len(skyheights)==0:
pass
else:
sky1hist,bins1 = histogram(skyheights,bins=nbins1,range=(obs1heights.min(),obs1heights.max()))
skysubtracted1 = sky1hist
sky2hist,bins2 = histogram(skyheights,bins=nbins2,range=(obs2heights.min(),obs2heights.max()))
skysubtracted2 = sky2hist
for p in range(len(skysubtracted1)):
if skysubtracted1[p] <0:
skysubtracted1[p]=0
for p in range(len(skysubtracted2)):
if skysubtracted2[p] <0:
skysubtracted2[p]=0
subtracted1 = subtracted1-(skysubtracted1*(tf-ti)/float(stime))
subtracted2 = subtracted2-(skysubtracted2*(tf-ti)/float(stime))
else:
stime = float(len(skyh5.root._f_getChild(skybmap[i][j])))
photons = concatenate(skyh5.root._f_getChild(skybmap[i][j])[:])
skyheights = right_shift(photons,32)%4096
if len(skyheights)==0:
pass
else:
sky1hist,bins1 = histogram(skyheights,bins=nbins1,range=(obs1heights.min(),obs1heights.max()))
skysubtracted1 = sky1hist-(dark1hist*(stime)/float(dtime))
sky2hist,bins2 = histogram(skyheights,bins=nbins2,range=(obs2heights.min(),obs2heights.max()))
skysubtracted2 = sky2hist-(dark2hist*(stime)/float(dtime))
for p in range(len(skysubtracted1)):
if skysubtracted1[p] <0:
skysubtracted1[p]=0
for p in range(len(skysubtracted2)):
if skysubtracted2[p] <0:
skysubtracted2[p]=0
subtracted1 = subtracted1-(skysubtracted1*(tf-ti)/float(stime))
subtracted2 = subtracted2-(skysubtracted2*(tf-ti)/float(stime))
else:
pass
counts[i][j] = sum(subtracted1)
if counts[i][j]<0:
counts[i][j]=0
except NoSuchNodeError:
counts[i][j]=0
subtracted1 = zeros((self.ui.nbins.value()))
subtracted2 = zeros((self.ui.nbins.value()))
for p in range(len(subtracted1)):
if subtracted1[p]<0:
subtracted1[p]=0
for p in range(len(subtracted2)):
if subtracted2[p]<0:
subtracted2[p]=0
totalhist1 += subtracted1
totalhist2 += subtracted2
nphot += counts[i][j]
print "plotting histogram of ", nphot, " pulse heights"
self.ui.countlabel.setText(str(nphot))
nbins = self.ui.nbins.value()
self.ui.histogram_plot.canvas.ax.clear()
self.ui.histogram_plot_2.canvas.ax.clear()
#if self.plot_type == "point":
#photon_hist = histogram(new_photons, bins = nbins, range = (min(new_photons), max(new_photons)))
#self.ui.histogram_plot.canvas.ax.plot(photon_hist[1][1:],photon_hist[0],'o')
#else:
#self.ui.histogram_plot.canvas.ax.hist(new_photons, bins = nbins, range = (min(new_photons),max(new_photons)), histtype='bar')
self.ui.histogram_plot.canvas.ax.bar(bins1[:-1],totalhist1, width=(bins1[1]-bins1[0]), bottom=0,linewidth=0)
self.ui.histogram_plot_2.canvas.ax.bar(bins2[:-1],totalhist2, width=(bins2[1]-bins2[0]), bottom=0,linewidth=0)
#self.ui.histogram_plot.canvas.format_labels()
self.ui.histogram_plot.canvas.draw()
self.ui.histogram_plot_2.canvas.draw()
if self.dark_subtraction == True:
darkh5.close()
if self.sky_subtraction == True:
skyh5.close()
h5file.close()
print "done"
def closeEvent(self, event=None):
if isfile(self.imagefile):
os.remove(str(self.imagefile))
if __name__ == "__main__":
app = QApplication(sys.argv)
myapp = StartQt4()
myapp.show()
app.exec_()
| gpl-2.0 |
rajul/tvb-framework | tvb/interfaces/web/mplh5/mplh5_server.py | 2 | 2720 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Lia Domide <[email protected]>
"""
import threading
import matplotlib
# Import this, to make sure the build process marks this code as reference.
import mplh5canvas.simple_server
SYNC_EVENT = threading.Event()
class ServerStarter(threading.Thread):
"""
Handler for starting in a different thread the MPLH5 server.
Synchronization event. We want to start MPLH5 server in a new thread,
but the main thread should wait for it, otherwise wrong import
of pylb might be used.
"""
logger = None
def run(self):
"""
Start MPLH5 server.
This method needs to be executed as soon as possible, before any import of pylab.
Otherwise the proper mplh5canvas back-end will not be used correctly.
"""
try:
matplotlib.use('module://tvb.interfaces.web.mplh5.mplh5_backend')
self.logger.info("MPLH5 back-end server started.")
except Exception, excep:
self.logger.error("Could not start MatplotLib server side!!!")
self.logger.exception(excep)
SYNC_EVENT.set()
def start_server(logger):
"""Start MPLH5 server in a new thread, to avoid crashes."""
thread = ServerStarter()
thread.logger = logger
thread.start()
SYNC_EVENT.wait()
| gpl-2.0 |
pompiduskus/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
r0k3/arctic | setup.py | 1 | 5262 | #
# Copyright (C) 2015 Man AHL
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import logging
from setuptools import setup
from setuptools.extension import Extension
from setuptools import find_packages
from setuptools.command.test import test as TestCommand
from Cython.Build import cythonize
import six
import sys
# Convert Markdown to RST for PyPI
# http://stackoverflow.com/a/26737672
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
changelog = pypandoc.convert('CHANGES.md', 'rst')
except (IOError, ImportError, OSError):
long_description = open('README.md').read()
changelog = open('CHANGES.md').read()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s', level='DEBUG')
# import here, cause outside the eggs aren't loaded
import pytest
args = [self.pytest_args] if isinstance(self.pytest_args, six.string_types) else list(self.pytest_args)
args.extend(['--cov', 'arctic',
'--cov-report', 'xml',
'--cov-report', 'html',
'--junitxml', 'junit.xml',
])
errno = pytest.main(args)
sys.exit(errno)
# Cython lz4
compress = Extension('arctic._compress',
sources=["src/_compress.pyx", "src/lz4.c", "src/lz4hc.c"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])
setup(
name="arctic",
version="1.25.0",
author="Man AHL Technology",
author_email="[email protected]",
description=("AHL Research Versioned TimeSeries and Tick store"),
license="GPL",
keywords=["ahl", "keyvalue", "tickstore", "mongo", "timeseries", ],
url="https://github.com/manahl/arctic",
packages=find_packages(),
long_description='\n'.join((long_description, changelog)),
cmdclass={'test': PyTest},
ext_modules=cythonize(compress),
setup_requires=["Cython",
"numpy",
"setuptools-git",
],
install_requires=["decorator",
"enum34",
"lz4",
"mockextras",
"pandas",
"pymongo>=3.0",
"python-dateutil",
"pytz",
"tzlocal",
],
tests_require=["mock",
"mockextras",
"pytest",
"pytest-cov",
"pytest-dbfixtures",
"pytest-timeout",
"pytest-xdist",
],
entry_points={'console_scripts': [
'arctic_init_library = arctic.scripts.arctic_init_library:main',
'arctic_list_libraries = arctic.scripts.arctic_list_libraries:main',
'arctic_delete_library = arctic.scripts.arctic_delete_library:main',
'arctic_enable_sharding = arctic.scripts.arctic_enable_sharding:main',
'arctic_copy_data = arctic.scripts.arctic_copy_data:main',
'arctic_create_user = arctic.scripts.arctic_create_user:main',
'arctic_prune_versions = arctic.scripts.arctic_prune_versions:main',
'arctic_fsck = arctic.scripts.arctic_fsck:main',
]
},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Cython",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Software Development :: Libraries",
],
)
| lgpl-2.1 |
LoLab-VU/pysb | pysb/simulator/cupsoda.py | 5 | 27740 | from pysb.simulator.base import Simulator, SimulatorException, SimulationResult
import pysb
import pysb.bng
import numpy as np
from scipy.constants import N_A
import os
import re
import subprocess
import tempfile
import time
import logging
from pysb.logging import EXTENDED_DEBUG
import shutil
from pysb.pathfinder import get_path
import sympy
import collections
from collections.abc import Iterable
try:
import pandas as pd
except ImportError:
pd = None
try:
import pycuda.driver as cuda
except ImportError:
cuda = None
class CupSodaSimulator(Simulator):
"""An interface for running cupSODA, a CUDA implementation of LSODA.
cupSODA is a graphics processing unit (GPU)-based implementation of the
LSODA simulation algorithm (see references). It requires an NVIDIA GPU
card with support for the CUDA framework version 7 or above. Further
details of cupSODA and software can be found on github:
https://github.com/aresio/cupSODA
The simplest way to install cupSODA is to use a pre-compiled version,
which can be downloaded from here:
https://github.com/aresio/cupSODA/releases
Parameters
----------
model : pysb.Model
Model to integrate.
tspan : vector-like, optional
Time values at which the integrations are sampled. The first and last
values define the time range.
initials : list-like, optional
Initial species concentrations for all simulations. Dimensions are
N_SIMS x number of species.
param_values : list-like, optional
Parameters for all simulations. Dimensions are N_SIMS x number of
parameters.
verbose : bool or int, optional
Verbosity level, see :class:`pysb.simulator.base.Simulator` for
further details.
**kwargs: dict, optional
Extra keyword arguments, including:
* ``gpu``: Index of GPU to run on (default: 0)
* ``vol``: System volume; required if model encoded in extrinsic
(number) units (default: None)
* ``obs_species_only``: Only output species contained in observables
(default: True)
* ``cleanup``: Delete all temporary files after the simulation is
finished. Includes both BioNetGen and cupSODA files. Useful for
debugging (default: True)
* ``prefix``: Prefix for the temporary directory containing cupSODA
input and output files (default: model name)
* ``base_dir``: Directory in which temporary directory with cupSODA
input and output files are placed (default: system directory
determined by `tempfile.mkdtemp`)
* ``integrator``: Name of the integrator to use; see
`default_integrator_options` (default: 'cupsoda')
* ``integrator_options``: A dictionary of keyword arguments to
supply to the integrator; see `default_integrator_options`.
Attributes
----------
model : pysb.Model
Model passed to the constructor.
tspan : numpy.ndarray
Time values passed to the constructor.
initials : numpy.ndarray
Initial species concentrations for all simulations. Dimensions are
number of simulations x number of species.
param_values : numpy.ndarray
Parameters for all simulations. Dimensions are number of simulations
x number of parameters.
verbose: bool or int
Verbosity setting. See the base class
:class:`pysb.simulator.base.Simulator` for further details.
gpu : int or list
Index of GPU being run on, or a list of integers to use multiple GPUs.
Simulations will be split equally among the of GPUs.
outdir : str
Directory where cupSODA output files are placed. Input files are
also placed here.
opts: dict
Dictionary of options for the integrator, which can include the
following:
* vol (float or None): System volume
* n_blocks (int or None): Number of GPU blocks used by the simulator
* atol (float): Absolute integrator tolerance
* rtol (float): Relative integrator tolerance
* chunksize (int or None): The maximum number of simulations to run
per GPU at one time. Set this option if your GPU is running out of
memory.
* memory_usage ('global', 'shared', or 'sharedconstant'): The type of
GPU memory to use
* max_steps (int): The maximum number of internal integrator iterations
(equivalent to LSODA's mxstep)
integrator : str
Name of the integrator in use (only "cupsoda" is supported).
Notes
-----
1. If `vol` is defined, species amounts and rate constants are assumed
to be in number units and are automatically converted to concentration
units before generating the cupSODA input files. The species
concentrations returned by cupSODA are converted back to number units
during loading.
2. If `obs_species_only` is True, only the species contained within
observables are output by cupSODA. All other concentrations are set
to 'nan'.
References
----------
1. Harris, L.A., Nobile, M.S., Pino, J.C., Lubbock, A.L.R., Besozzi, D.,
Mauri, G., Cazzaniga, P., and Lopez, C.F. 2017. GPU-powered model
analysis with PySB/cupSODA. Bioinformatics 33, pp.3492-3494.
2. Nobile M.S., Cazzaniga P., Besozzi D., Mauri G., 2014. GPU-accelerated
simulations of mass-action kinetics models with cupSODA, Journal of
Supercomputing, 69(1), pp.17-24.
3. Petzold, L., 1983. Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations. SIAM journal on
scientific and statistical computing, 4(1), pp.136-148.
"""
_supports = {'multi_initials': True, 'multi_param_values': True}
_memory_options = {'global': '0', 'shared': '1', 'sharedconstant': '2'}
default_integrator_options = {
# some sane default options for a few well-known integrators
'cupsoda': {
'max_steps': 20000, # max # of internal iterations (LSODA's MXSTEP)
'atol': 1e-8, # absolute tolerance
'rtol': 1e-8, # relative tolerance
'chunksize': None, # Max number of simulations per GPU per run
'n_blocks': None, # number of GPU blocks
'memory_usage': 'sharedconstant'}} # see _memory_options dict
_integrator_options_allowed = {'max_steps', 'atol', 'rtol', 'n_blocks',
'memory_usage', 'vol', 'chunksize'}
def __init__(self, model, tspan=None, initials=None, param_values=None,
verbose=False, **kwargs):
super(CupSodaSimulator, self).__init__(model, tspan=tspan,
initials=initials,
param_values=param_values,
verbose=verbose, **kwargs)
self.gpu = kwargs.pop('gpu', (0, ))
if not isinstance(self.gpu, Iterable):
self.gpu = [self.gpu]
self._obs_species_only = kwargs.pop('obs_species_only', True)
self._cleanup = kwargs.pop('cleanup', True)
self._prefix = kwargs.pop('prefix', self._model.name)
# Sanitize the directory - cupsoda doesn't handle spaces etc. well
self._prefix = re.sub('[^0-9a-zA-Z]', '_', self._prefix)
self._base_dir = kwargs.pop('base_dir', None)
self.integrator = kwargs.pop('integrator', 'cupsoda')
integrator_options = kwargs.pop('integrator_options', {})
if kwargs:
raise ValueError('Unknown keyword argument(s): {}'.format(
', '.join(kwargs.keys())
))
unknown_integrator_options = set(integrator_options.keys()).difference(
self._integrator_options_allowed
)
if unknown_integrator_options:
raise ValueError(
'Unknown integrator_options: {}. Allowed options: {}'.format(
', '.join(unknown_integrator_options),
', '.join(self._integrator_options_allowed)
)
)
# generate the equations for the model
pysb.bng.generate_equations(self._model, self._cleanup, self.verbose)
# build integrator options list from our defaults and any kwargs
# passed to this function
options = {}
if self.default_integrator_options.get(self.integrator):
options.update(self.default_integrator_options[
self.integrator]) # default options
else:
raise SimulatorException(
"Integrator type '" + self.integrator + "' not recognized.")
options.update(integrator_options) # overwrite
# defaults
self.opts = options
self._out_species = None
# private variables (to reduce the number of function calls)
self._len_rxns = len(self._model.reactions)
self._len_species = len(self._model.species)
self._len_params = len(self._model.parameters)
self._model_parameters_rules = self._model.parameters_rules()
# Set cupsoda verbosity level
logger_level = self._logger.logger.getEffectiveLevel()
if logger_level <= EXTENDED_DEBUG:
self._cupsoda_verbose = 2
elif logger_level <= logging.DEBUG:
self._cupsoda_verbose = 1
else:
self._cupsoda_verbose = 0
# regex for extracting cupSODA reported running time
self._running_time_regex = re.compile(r'Running time:\s+(\d+\.\d+)')
def _run_chunk(self, gpus, outdir, chunk_idx, cmtx, sims, trajectories,
tout):
_indirs = {}
_outdirs = {}
p = {}
# Path to cupSODA executable
bin_path = get_path('cupsoda')
# Start simulations
for gpu in gpus:
_indirs[gpu] = os.path.join(outdir, "INPUT_GPU{}_{}".format(
gpu, chunk_idx))
os.mkdir(_indirs[gpu])
_outdirs[gpu] = os.path.join(outdir, "OUTPUT_GPU{}_{}".format(
gpu, chunk_idx))
# Create cupSODA input files
self._create_input_files(_indirs[gpu], sims[gpu], cmtx)
# Build command
# ./cupSODA input_model_folder blocks output_folder simulation_
# file_prefix gpu_number fitness_calculation memory_use dump
command = [bin_path, _indirs[gpu], str(self.n_blocks),
_outdirs[gpu], self._prefix, str(gpu),
'0', self._memory_usage, str(self._cupsoda_verbose)]
self._logger.info("Running cupSODA: " + ' '.join(command))
# Run simulation and return trajectories
p[gpu] = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Read results
for gpu in gpus:
(p_out, p_err) = p[gpu].communicate()
p_out = p_out.decode('utf-8')
p_err = p_err.decode('utf-8')
logger_level = self._logger.logger.getEffectiveLevel()
if logger_level <= logging.INFO:
run_time_match = self._running_time_regex.search(p_out)
if run_time_match:
self._logger.info('cupSODA GPU {} chunk {} reported '
'time: {} seconds'.format(
gpu,
chunk_idx,
run_time_match.group(1)))
self._logger.debug('cupSODA GPU {} chunk {} stdout:\n{}'.format(
gpu, chunk_idx, p_out))
if p_err:
self._logger.error('cupSODA GPU {} chunk {} '
'stderr:\n{}'.format(
gpu, chunk_idx, p_err))
if p[gpu].returncode:
raise SimulatorException(
"cupSODA GPU {} chunk {} exception:\n{}\n{}".format(
gpu, chunk_idx, p_out.rstrip("at line"), p_err.rstrip()
)
)
tout_run, trajectories_run = self._load_trajectories(
_outdirs[gpu], sims[gpu])
if trajectories is None:
tout = tout_run
trajectories = trajectories_run
else:
tout = np.concatenate((tout, tout_run))
trajectories = np.concatenate(
(trajectories, trajectories_run))
return tout, trajectories
def run(self, tspan=None, initials=None, param_values=None):
"""Perform a set of integrations.
Returns a :class:`.SimulationResult` object.
Parameters
----------
tspan : list-like, optional
Time values at which the integrations are sampled. The first and
last values define the time range.
initials : list-like, optional
Initial species concentrations for all simulations. Dimensions are
number of simulation x number of species.
param_values : list-like, optional
Parameters for all simulations. Dimensions are number of
simulations x number of parameters.
Returns
-------
A :class:`SimulationResult` object
Notes
-----
1. An exception is thrown if `tspan` is not defined in either
`__init__`or `run`.
2. If neither `initials` nor `param_values` are defined in either
`__init__` or `run` a single simulation is run with the initial
concentrations and parameter values defined in the model.
"""
super(CupSodaSimulator, self).run(tspan=tspan, initials=initials,
param_values=param_values,
_run_kwargs=[])
# Create directories for cupSODA input and output files
_outdirs = {}
_indirs = {}
start_time = time.time()
cmtx = self._get_cmatrix()
outdir = tempfile.mkdtemp(prefix=self._prefix + '_',
dir=self._base_dir)
self._logger.debug("Output directory is %s" % outdir)
# Set up chunking (enforce max # sims per GPU per run)
n_sims = len(self.param_values)
chunksize_gpu = self.opts.get('chunksize', None)
if chunksize_gpu is None:
chunksize_gpu = n_sims
chunksize_total = chunksize_gpu * len(self.gpu)
tout = None
trajectories = None
chunks = np.array_split(range(n_sims),
np.ceil(n_sims / chunksize_total))
try:
for chunk_idx, chunk in enumerate(chunks):
self._logger.debug('cupSODA chunk {} of {}'.format(
(chunk_idx + 1), len(chunks)))
# Split chunk equally between GPUs
sims = dict(zip(self.gpu, np.array_split(chunk,
len(self.gpu))))
tout, trajectories = self._run_chunk(
self.gpu, outdir, chunk_idx, cmtx, sims,
trajectories, tout)
finally:
if self._cleanup:
shutil.rmtree(outdir)
end_time = time.time()
self._logger.info("cupSODA + I/O time: {} seconds".format(
end_time - start_time))
return SimulationResult(self, tout, trajectories)
@property
def _memory_usage(self):
try:
return self._memory_options[self.opts['memory_usage']]
except KeyError:
raise Exception('memory_usage must be one of %s',
self._memory_options.keys())
@property
def vol(self):
vol = self.opts.get('vol', None)
return vol
@vol.setter
def vol(self, volume):
self.opts['vol'] = volume
@property
def n_blocks(self):
n_blocks = self.opts.get('n_blocks')
if n_blocks is None:
default_threads_per_block = 32
bytes_per_float = 4
memory_per_thread = (self._len_species + 1) * bytes_per_float
if cuda is None:
threads_per_block = default_threads_per_block
else:
cuda.init()
device = cuda.Device(self.gpu[0])
attrs = device.get_attributes()
shared_memory_per_block = attrs[
cuda.device_attribute.MAX_SHARED_MEMORY_PER_BLOCK]
upper_limit_threads_per_block = attrs[
cuda.device_attribute.MAX_THREADS_PER_BLOCK]
max_threads_per_block = min(
shared_memory_per_block / memory_per_thread,
upper_limit_threads_per_block)
threads_per_block = min(max_threads_per_block,
default_threads_per_block)
n_blocks = int(
np.ceil(1. * len(self.param_values) / threads_per_block))
self._logger.debug('n_blocks set to {} (used pycuda: {})'.format(
n_blocks, cuda is not None
))
self.n_blocks = n_blocks
return n_blocks
@n_blocks.setter
def n_blocks(self, n_blocks):
if not isinstance(n_blocks, int):
raise ValueError("n_blocks must be an integer")
if n_blocks <= 0:
raise ValueError("n_blocks must be greater than 0")
self.opts['n_blocks'] = n_blocks
def _create_input_files(self, directory, sims, cmtx):
# atol_vector
with open(os.path.join(directory, "atol_vector"), 'w') as atol_vector:
for i in range(self._len_species):
atol_vector.write(str(self.opts.get('atol')))
if i < self._len_species - 1:
atol_vector.write("\n")
# c_matrix
with open(os.path.join(directory, "c_matrix"), 'w') as c_matrix:
for i in sims:
line = ""
for j in range(self._len_rxns):
if j > 0:
line += "\t"
line += str(cmtx[i][j])
c_matrix.write(line)
if i != sims[-1]:
c_matrix.write("\n")
# cs_vector
with open(os.path.join(directory, "cs_vector"), 'w') as cs_vector:
self._out_species = range(self._len_species) # species to output
if self._obs_species_only:
self._out_species = [False for sp in self._model.species]
for obs in self._model.observables:
for i in obs.species:
self._out_species[i] = True
self._out_species = [i for i in range(self._len_species) if
self._out_species[i] is True]
for i in range(len(self._out_species)):
if i > 0:
cs_vector.write("\n")
cs_vector.write(str(self._out_species[i]))
# left_side
with open(os.path.join(directory, "left_side"), 'w') as left_side:
for i in range(self._len_rxns):
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
stoich = 0
for k in self._model.reactions[i]['reactants']:
if j == k:
stoich += 1
line += str(stoich)
if i < self._len_rxns - 1:
left_side.write(line + "\n")
else:
left_side.write(line)
# max_steps
with open(os.path.join(directory, "max_steps"), 'w') as mxsteps:
mxsteps.write(str(self.opts['max_steps']))
# model_kind
with open(os.path.join(directory, "modelkind"), 'w') as model_kind:
# always set modelkind to 'deterministic'
model_kind.write("deterministic")
# MX_0
with open(os.path.join(directory, "MX_0"), 'w') as MX_0:
mx0 = self.initials
# if a volume has been defined, rescale populations
# by N_A*vol to get concentration
# (NOTE: act on a copy of self.initials, not
# the original, which we don't want to modify)
if self.vol:
mx0 = mx0.copy()
mx0 /= (N_A * self.vol)
for i in sims:
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
line += str(mx0[i][j])
MX_0.write(line)
if i != sims[-1]:
MX_0.write("\n")
# right_side
with open(os.path.join(directory, "right_side"), 'w') as right_side:
for i in range(self._len_rxns):
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
stochiometry = 0
for k in self._model.reactions[i]['products']:
if j == k:
stochiometry += 1
line += str(stochiometry)
if i < self._len_rxns - 1:
right_side.write(line + "\n")
else:
right_side.write(line)
# rtol
with open(os.path.join(directory, "rtol"), 'w') as rtol:
rtol.write(str(self.opts.get('rtol')))
# t_vector
with open(os.path.join(directory, "t_vector"), 'w') as t_vector:
for t in self.tspan:
t_vector.write(str(float(t)) + "\n")
# time_max
with open(os.path.join(directory, "time_max"), 'w') as time_max:
time_max.write(str(float(self.tspan[-1])))
def _get_cmatrix(self):
if self.model.tags:
raise ValueError('cupSODA does not currently support local '
'functions')
self._logger.debug("Constructing the c_matrix:")
c_matrix = np.zeros((len(self.param_values), self._len_rxns))
par_names = [p.name for p in self._model_parameters_rules]
rate_mask = np.array([p in self._model_parameters_rules for p in
self._model.parameters])
rate_args = []
par_vals = self.param_values[:, rate_mask]
rate_order = []
for rxn in self._model.reactions:
rate_args.append([arg for arg in rxn['rate'].atoms(sympy.Symbol) if
not arg.name.startswith('__s')])
reactants = len(rxn['reactants'])
rate_order.append(reactants)
output = 0.01 * len(par_vals)
output = int(output) if output > 1 else 1
for i in range(len(par_vals)):
if i % output == 0:
self._logger.debug(str(int(round(100. * i / len(par_vals)))) +
"%")
for j in range(self._len_rxns):
rate = 1.0
for r in rate_args[j]:
if isinstance(r, pysb.Parameter):
rate *= par_vals[i][par_names.index(r.name)]
elif isinstance(r, pysb.Expression):
raise ValueError('cupSODA does not currently support '
'models with Expressions')
else:
rate *= r
# volume correction
if self.vol:
rate *= (N_A * self.vol) ** (rate_order[j] - 1)
c_matrix[i][j] = rate
self._logger.debug("100%")
return c_matrix
def _load_trajectories(self, directory, sims):
"""Read simulation results from output files.
Returns `tout` and `trajectories` arrays.
"""
files = [filename for filename in os.listdir(directory) if
re.match(self._prefix, filename)]
if len(files) == 0:
raise SimulatorException(
"Cannot find any output files to load data from.")
if len(files) != len(sims):
raise SimulatorException(
"Number of output files (%d) does not match number "
"of requested simulations (%d)." % (
len(files), len(sims)))
n_sims = len(files)
trajectories = [None] * n_sims
tout = [None] * n_sims
traj_n = np.ones((len(self.tspan), self._len_species)) * float('nan')
tout_n = np.ones(len(self.tspan)) * float('nan')
# load the data
indir_prefix = os.path.join(directory, self._prefix)
for idx, n in enumerate(sims):
trajectories[idx] = traj_n.copy()
tout[idx] = tout_n.copy()
filename = indir_prefix + "_" + str(idx)
if not os.path.isfile(filename):
raise Exception("Cannot find input file " + filename)
# determine optimal loading method
if idx == 0:
(data, use_pandas) = self._test_pandas(filename)
# load data
else:
if use_pandas:
data = self._load_with_pandas(filename)
else:
data = self._load_with_openfile(filename)
# store data
tout[idx] = data[:, 0]
trajectories[idx][:, self._out_species] = data[:, 1:]
# volume correction
if self.vol:
trajectories[idx][:, self._out_species] *= (N_A * self.vol)
return np.array(tout), np.array(trajectories)
def _test_pandas(self, filename):
""" calculates the fastest method to load in data
Parameters
----------
filename : str
filename to laod in
Returns
-------
np.array, bool
"""
# using open(filename,...)
start = time.time()
data = self._load_with_openfile(filename)
end = time.time()
load_time_openfile = end - start
# using pandas
if pd:
start = time.time()
self._load_with_pandas(filename)
end = time.time()
load_time_pandas = end - start
if load_time_pandas < load_time_openfile:
return data, True
return data, False
@staticmethod
def _load_with_pandas(filename):
data = pd.read_csv(filename, sep='\t', skiprows=None,
header=None).to_numpy()
return data
@staticmethod
def _load_with_openfile(filename):
with open(filename, 'r') as f:
data = [line.rstrip('\n').split() for line in f]
data = np.array(data, dtype=np.float, copy=False)
return data
def run_cupsoda(model, tspan, initials=None, param_values=None,
integrator='cupsoda', cleanup=True, verbose=False, **kwargs):
"""Wrapper method for running cupSODA simulations.
Parameters
----------
See ``CupSodaSimulator`` constructor.
Returns
-------
SimulationResult.all : list of record arrays
List of trajectory sets. The first dimension contains species,
observables and expressions (in that order)
"""
sim = CupSodaSimulator(model, tspan=tspan, integrator=integrator,
cleanup=cleanup, verbose=verbose, **kwargs)
simres = sim.run(initials=initials, param_values=param_values)
return simres.all
| bsd-2-clause |
rustychris/stompy | examples/curvilinear_grids.py | 1 | 1769 | import numpy as np
import matplotlib.pyplot as plt
from stompy.grid import front, unstructured_grid
from stompy.plot import plot_wkb
from stompy.spatial import linestring_utils
from stompy import filters
from shapely import geometry
##
# define the domain
s=np.linspace(0,6000,200) # along-channel coordinate
amp=700 # amplitude of meanders
lamb=4000 # wave-length of meanders
width=500 # mean channel width
noise_w=50 # amplitude of noise to add to the channel banks
noise_l=1500 # length-scale of noise
centerline=np.c_[ s, amp*np.cos(2*np.pi*s/lamb)]
pline=geometry.LineString(centerline)
channel=pline.buffer(width/2)
ring=np.array(channel.exterior)
ring_norm=linestring_utils.left_normals(ring)
noise=(np.random.random(len(ring_norm))-0.5)
winsize=int( noise_l/( channel.exterior.length/len(ring_norm) ) )
noise[:winsize]=0 # so the ends still match up
noise[-winsize:]=0
noise_lp=filters.lowpass_fir(noise,winsize)
noise_lp *= noise_w/np.sqrt(np.mean(noise_lp**2))
# domain boundary including the random noise
ring_noise=ring+noise_lp[:,None]*ring_norm
# Create the curvilinear section
thalweg=centerline[50:110]
plt.figure(1).clf()
plt.plot(centerline[:,0],
centerline[:,1],
'k-',zorder=2)
plt.axis('equal')
plot_wkb.plot_wkb(channel,zorder=-2)
plt.plot(ring_noise[:,0],
ring_noise[:,1],
'm-')
plt.plot(thalweg[:,0],thalweg[:,1],'r--',lw=3)
##
# First, just the curvilinear section:
g=unstructured_grid.UnstructuredGrid(max_sides=4)
thalweg_resamp=linestring_utils.resample_linearring(thalweg,50,closed_ring=0)
g.add_rectilinear_on_line(thalweg_resamp,
profile=lambda x,s,perp: np.linspace(-200,200,20),
add_streamwise=False)
g.plot_edges(zorder=5,color='y')
| mit |
smenon8/AnimalWildlifeEstimator | script/ClassifierCapsuleClass.py | 1 | 2310 | from sklearn.metrics import classification_report,accuracy_score,f1_score,precision_score,recall_score,roc_auc_score,mean_absolute_error, mean_squared_error,zero_one_loss,roc_curve
import warnings
import sys
import pandas as pd
from BaseCapsuleClass import BaseCapsule
class ClassifierCapsule(BaseCapsule):
def __init__(self,clfObj,methodName,splitPercent,train_x,train_y,test_x,test_y):
BaseCapsule.__init__(self,clfObj,methodName,splitPercent,train_x,train_y,test_x,test_y)
self.predProbabs = None
self.predScores = None
self.accScore = None
self.f1Score = None
self.precision = None
self.recall = None
self.auc = None
self.abserr = None
self.sqerr = None
self.zerooneloss = None
self.roccurve = None
self.warningMsg = None
def evalClassifierPerf(self):
self.accScore = accuracy_score(self.test_y,self.preds)
self.precision = precision_score(self.test_y,self.preds)
self.recall = recall_score(self.test_y,self.preds)
self.auc = roc_auc_score(self.test_y,self.predProbabs) # intakes predicition probabilities
self.roccurve = roc_curve(self.test_y,self.predProbabs)
self.abserr = mean_absolute_error(self.test_y,self.preds)
self.sqerr = mean_squared_error(self.test_y,self.preds)
self.zerooneloss = zero_one_loss(self.test_y,self.preds)
with warnings.catch_warnings(record=True) as w:
self.f1Score = f1_score(self.test_y,self.preds)
if len(w) >= 1:
self.warningMsg = "Warning: F-score ill-defined because of no valid predictions (F-score set to 0)"
print(self.warningMsg)
return -1
else:
return 0
def runClf(self,computeMetrics=True):
BaseCapsule.run(self)
self.predProbabs = self.clfObj.predict_proba(self.test_x)[:,1]
if computeMetrics:
return self.evalClassifierPerf()
else:
return 0
def __str__(self):
keys = ['splitPercent', 'accScore', 'abserr', 'zerooneloss','f1Score', 'precision', 'auc', 'sqerr', 'methodName', 'accScore', 'recall']
printableDict = {key : self.__dict__[key] for key in keys}
return str(printableDict) | bsd-3-clause |
justincassidy/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
kuchenrolle/redditquery | src/redditquery/reddit.py | 1 | 6478 | #!/usr/bin/python3
import json
import os
import sys
import spacy
from pandas import period_range
from multiprocessing import Pool
from urllib.request import FancyURLopener
from redditquery.utils import recursive_walk, check_directory
urlretrieve = FancyURLopener().retrieve
# data will be downloaded from http://files.pushshift.io/reddit/comments/
# data structure is document in https://github.com/reddit/reddit/wiki/JSON
class RedditDownloader():
""" Downloads and Decompresses reddit comment archive files.
Parameters
----------
start : str
First month to be processed as YYYY/MM
end : str
Last month to be processed as YYYY/MM
directory : str
Directory to store data in
report_progress : Boolean
Display progress report to stderr
keep_compressed : Boolean
Keep compressed archive files
"""
def __init__(self, start, end, directory, report_progress, keep_compressed):
self.directory = check_directory(directory)
self.report_progress = report_progress
self.keep_compressed = keep_compressed
self.months = period_range(start, end, freq = "M")
def decompress(self, compressed_path, decompressed_path):
"""Decompress bz2 file (compressed_path) incrementally.
Parameters
----------
compressed_path : str or path object
file to be decompressed
decompressed_path : str or path object
file to be decompressed into
"""
from bz2 import BZ2Decompressor
with open(decompressed_path, 'wb') as decompressed, open(compressed_path, 'rb') as compressed:
decompressor = BZ2Decompressor()
total_size = os.path.getsize(compressed_path)
size_so_far = 0
if self.report_progress:
sys.stderr.write("\n")
for data in iter(lambda : compressed.read(100 * 1024), b''):
decompressed.write(decompressor.decompress(data))
if self.report_progress:
size_so_far += 102400
percentage = min(int(size_so_far * 100 / total_size), 100)
sys.stderr.write("\rDecompression: {}%".format(percentage))
sys.stderr.flush()
if not self.keep_compressed:
os.remove(compressed_path)
@staticmethod
def download_progress(count, block_size, total_size):
"""Hook to update download progress."""
percentage = min(int(100 * count * block_size / total_size),100)
sys.stderr.write("\rDownload: {}%".format(percentage))
sys.stderr.flush()
def download_month(self, month):
"""Download data for given month.
Parameters
----------
month : str or date object
Month to be downloaded, str(month) must result in YYYY/MM
"""
file_url = "http://files.pushshift.io/reddit/comments/RC_{}.bz2".format(month)
file_path = os.path.join(self.directory, "RC_{}.bz2".format(month))
if self.report_progress:
urlretrieve(file_url, file_path, reporthook = RedditDownloader.download_progress)
else:
urlretrieve(file_url, file_path)
def download_all(self):
"""Downloads data for all months in self.months"""
for month in self.months:
if self.report_progress:
sys.stderr.write("\n")
self.download_month(month)
def decompress_month(self, month):
"""Decompress archive file for given month.
Parameters
----------
month : str or date object
Month to be decompressed, str(month) must result in YYYY/MM
"""
compressed_path = os.path.join(self.directory, "RC_{}.bz2".format(month))
decompressed_path = os.path.join(self.directory, "RC_{}.json".format(month))
self.decompress(compressed_path = compressed_path, decompressed_path = decompressed_path)
def decompress_all(self):
"""Decompress files for all months."""
for month in self.months:
self.decompress_month(month)
def process_month(self, month):
"""Download file for specific month and decompress.
Parameters
----------
month : str or date object
Month to be processed, str(month) must result in YYYY/MM
"""
self.download_month(month)
self.decompress_month(month)
def process_all(self):
"""Download and decompress files for all months."""
for month in self.months:
self.process_month(month)
def process_all_parallel(self, num_cores):
"""Download and decompress files for all months in parallel
Parameters
----------
num_cores : int
Number of cores to use
"""
if num_cores == 1:
self.process_all()
else:
self.report_progress = False
with Pool(num_cores) as pool:
for _ in pool.imap_unordered(self.process_month, self.months):
pass
def DocumentGenerator(directory, fulltext, lemmatize):
"""
Takes a directory with reddit comment archive files (JSON)
and returns tuples of the comment id and a list of tokens for each comment.
Parameters
----------
directory : str or path object
Directory with comment files
fulltext : Boolean
return full comment as well
lemmatize : Boolean
lemmatize tokens in comments
"""
files = recursive_walk(directory)
nlp = spacy.load("en")
for month in files:
if not month.endswith("json"):
continue
month = open(month, "r")
for comment in month:
comment = json.loads(comment)
text = comment["body"]
comment_id = comment["id"]
tokens = nlp(text)
if lemmatize:
tokens = [token.lemma_.strip().lower() for token in tokens if not token.pos_.startswith(u"PU")] # filter punctuation
else:
tokens = [token.string.strip().lower() for token in tokens if not token.pos_.startswith(u"PU")]
if not fulltext:
text = ""
yield comment_id, tokens, text | mit |
M4573R/BuildingMachineLearningSystemsWithPython | ch08/norm.py | 23 | 2242 | import numpy as np
class NormalizePositive(object):
def __init__(self, axis=0):
self.axis = axis
def fit(self, features, y=None):
# count features that are greater than zero in axis `self.axis`:
if self.axis == 1:
features = features.T
binary = (features > 0)
count = binary.sum(axis=0)
# to avoid division by zero, set zero counts to one:
count[count == 0] = 1.
self.mean = features.sum(axis=0)/count
# Compute variance by average squared difference to the mean, but only
# consider differences where binary is True (i.e., where there was a
# true rating):
diff = (features - self.mean) * binary
diff **= 2
# regularize the estimate of std by adding 0.1
self.std = np.sqrt(0.1 + diff.sum(axis=0)/count)
return self
def transform(self, features):
if self.axis == 1:
features = features.T
binary = (features > 0)
features = features - self.mean
features /= self.std
features *= binary
if self.axis == 1:
features = features.T
return features
def inverse_transform(self, features, copy=True):
if copy:
features = features.copy()
if self.axis == 1:
features = features.T
features *= self.std
features += self.mean
if self.axis == 1:
features = features.T
return features
def fit_transform(self, features):
return self.fit(features).transform(features)
def predict(train):
norm = NormalizePositive()
train = norm.fit_transform(train)
return norm.inverse_transform(train * 0.)
def main(transpose_inputs=False):
from load_ml100k import get_train_test
from sklearn import metrics
train,test = get_train_test(random_state=12)
if transpose_inputs:
train = train.T
test = test.T
predicted = predict(train)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score ({} normalization): {:.1%}'.format(
('movie' if transpose_inputs else 'user'),
r2))
if __name__ == '__main__':
main()
main(transpose_inputs=True)
| mit |
pratapvardhan/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | examples/manifold/plot_lle_digits.py | 6 | 9383 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
Linear Discriminant Analysis, from the :mod:`sklearn.discriminant_analysis`
module, and Neighborhood Components Analysis, from the :mod:`sklearn.neighbors`
module, are supervised dimensionality reduction method, i.e. they make use of
the provided labels, contrary to other methods.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection, neighbors)
print(__doc__)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
# ----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
# ----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
# ----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
# ----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2
).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap projection")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
# ----------------------------------------------------------------------
# NCA projection of the digits dataset
print("Computing NCA projection")
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=2, random_state=0)
t0 = time()
X_nca = nca.fit_transform(X, y)
plot_embedding(X_nca,
"NCA embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
mblondel/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
FaustinCarter/scraps | scraps/resonator.py | 1 | 30634 | import numpy as np
import lmfit as lf
import glob
import scipy.signal as sps
import pandas as pd
class Resonator(object):
r"""Fit an S21 measurement of a hanger (or notch) type resonator.
Parameters
----------
name : string
The resonator name. Does not have to be unique, but each physical
resonator in the experiment should have a unique name to avoid
confusion when using some of the other tools in scraps.
temp : float
The temperature in (K) that the S21 measurement was taken at.
pwr : float
The power (in dBm) at which the resonator was measured.
freq : array-like[nDataPoints]
The frequency points at which the S21 scan was measured.
I : array-like[nDataPoints]
The in-phase (or real part) of the complex S21 measurement. Units
are typically volts, and I should be specified in linear units (as
opposed to dB).
Q : array-like[nDataPoints]
The out-of-phase (or imaginary part) of the complex S21 measurement.
Units are typically volts, and I should be specified in linear units
(as opposed to dB).
sigmaI : array-like[nDataPoints] (optional)
An array of uncertaintly values for each data point in `I`. Default
is ``None``.
sigmaQ : array-like[nDataPoints] (optional)
An array of uncertaintly values for each data point in `Q`. Default
is ``None``.
The following attributes are automatically calculated and added during
initialization.
Attributes
----------
name : string
The resonator name passed at initialization.
temp : float
The temperature passed at initialization.
pwr : float
The power passed at initialization.
freq : array-like[nDataPoints]
The frequency points passed at initialization.
I : array-like[nDataPoints]
The I data points passed at initialization.
Q : array-like[nDataPoints]
The Q data points passed at initialization.
sigmaI : array-like[nDataPoints]
The sigmaI values passed at initialization.
sigmaQ : array-like[nDataPoints]
The sigmaQ values passed at initialization.
S21 : array-like[nDataPoints]
The complex transmission ``S21 = I + 1j*Q``.
phase : array-like[nDataPoints]
The raw phase ``phase = np.arctan2(Q, I)``.
uphase : array-like[nDataPoints]
The unwrapped phase is equivalent to the phase, but with jumps of 2 Pi
removed.
mag : array-like[nDataPoints]
The magnitude ``mag = np.abs(S21)`` or, equivalently ``mag =
np.sqrt(I**2 + Q**2)``.
hasFit : bool
Indicates whether or not ``Resonator.do_lmfit`` method has been called.
lmfit_result : ``lmfit.Result`` object
The result object created by ``lmfit`` containing all the fit
information. Some of the fit information is futher extracted for
convenience in the following Attributes. For an exhaustive list of the
attributes of lmfit_result see the docs for ``lmfit``. The most useful
attribute of this object is ``lmfit_result.params``, which contains the
best-fit parameter values.
residualI : array-like[nDataPoints]
The residual of the fit model against the `I` data, wieghted by the
uncertainties.
residualQ : array-like[nDataPoints]
The residual of the fit model against the `Q` data, wieghted by the
uncertainties.
resultI : array-like[nDataPoints]
The best ``lmfit`` fit result to the fit model for `I`.
resultQ : array-like[nDataPoints]
The best ``lmfit`` fit result to the fit model for `Q`.
resultMag : array-like[nDataPoints]
``resultMag = np.abs(resultI + 1j*resultQ)``
resultPhase : array-like[nDataPoints]
``resultPhase = np.arctan2(resultQ/resultI)``
emcee_result : ``lmfit.Result`` object
This object is nearly identical to the `lmfit_result` object, but also
contains the maximum-liklihood values for the *varying* parameters of
the fit model as well as the `chains` returned by ``emcee``. The most
important attribute is probably ``emcee_result.flatchain``, which can be
passed directly to ``pygtc`` or ``corner`` to make a really nice
GTC/Triangle/Corner plot. For an exhaustive list of the attributes of
emcee_result see the docs for ``lmfit``, specifically the section
involving the ``lmfit`` implementation of ``emcee``.
mle_vals : list of float
The maximum-liklihood estimate values of the *varying* parameter in the
fit model as calculated by ``emcee``. Unpacked here for convenience from
``emcee_result.params``.
mle_labels: list of string
The parameter names of the values in `mle_vals`. Provided here for easy
passing to ``pygtc`` or ``corner``.
magBaseLine : array-like[nDataPoints]
The best initial guess of the baseline of the magnitude. Calculated by
fitting a quadratic polynomial to the beginning and end of the magnitdue
vs frequency curve.
phaseBaseLine: array-like[nDataPoints]
The best initial guess of the baseline of the phase. Calculated by
fitting a line to the beginning and end of the phase vs frequency curve.
This is equivalent to calculating the electrical delay in the
measurement lines.
params : ``lmfit.Parameters`` object
The initial parameter guesses for fitting the `S21` data. See ``lmfit``
documentation for a complete overview. To get the parameter names, call
``params.keys()``. Default is ``None``. Initialize params by calling
``Resonator.load_params``. Delete params with
``Resonator.torch_params``.
"""
#Do some initialization
def __init__(self, name, temp, pwr, freq, I, Q, sigmaI = None, sigmaQ = None):
r"""Initializes a resonator object by calculating magnitude, phase, and
a bunch of fit parameters for a hanger (or notch) type S21 measurement.
"""
self.name = name
self.temp = temp
self.pwr = pwr
self.freq = np.asarray(freq)
self.I = np.asarray(I)
self.Q = np.asarray(Q)
self.sigmaI = np.asarray(sigmaI) if sigmaI is not None else None
self.sigmaQ = np.asarray(sigmaQ) if sigmaQ is not None else None
self.S21 = I + 1j*Q
self.phase = np.arctan2(Q,I) #use arctan2 because it is quadrant-aware
self.uphase = np.unwrap(self.phase) #Unwrap the 2pi phase jumps
self.mag = np.abs(self.S21) #Units are volts.
self.logmag = 20*np.log10(self.mag) #Units are dB (20 because V->Pwr)
#Find the frequency at magnitude minimum (this can, and should, be
#overwritten by a custom params function)
self.fmin = self.freq[np.argmin(self.mag)]
#Whether or not params has been initialized
self.params = None
self.hasParams = False
#These won't exist until the lmfit method is called
self.lmfit_result = None
#These are scheduled for deprecation. They will eventually live in the lmfit_result dictionary
self.hasFit = False
self.residualI = None
self.residualQ = None
self.resultI = None
self.resultQ = None
self.resultMag = None
self.resultPhase = None
#These won't exist until the emcee method is called
self.emcee_result = None
#These are scheduled for deprecation. They will eventually live in the lmfit_result dictionary
self.hasChain = False
self.mle_vals = None
self.mle_labels = None
def to_disk(self):
"""To be implemented: dumps resonator to disk as various file types. Default will be netcdf4"""
pass
def from_disk(self):
"""To be implemented: load resonator object from disk."""
pass
def to_json(self):
"""To be implemented: serialize resonator as a JSON string"""
pass
def from_json(self):
"""To be implemented: create rsonator from JSON string"""
pass
#TODO: Implement the following for handling pickling:
#def __getstate__(self):
# pass
#def __setstate__(self):
# pass
def load_params(self, paramsFn, **kwargs):
"""Load up a lmfit Parameters object for a custom fit function.
Parameters
----------
paramsFn : method
The paramsFn method should return a ``lmfit.Paramters`` object. This
object will be passed to the fit method when ``do_lmfit`` or
``do_emcee`` is
called.
kwargs : dict
A dictionary of keyword arguments to pass to paramsFn.
"""
params = paramsFn(self, **kwargs)
self.params = params
self.hasParams = True
def torch_params(self):
"""Reset params attribute to ``None``."""
self.params = None
self.hasParams = False
def do_lmfit(self, fitFn, label='default', fit_type='IQ', **kwargs):
r"""Run lmfit on an existing resonator object and update the results.
Parameters
----------
fitFn : function
fitFn must have the signature fitFn(params, res, residual, **kwargs).
If residual == True, fitFn must return a 1D list-like object of
residuals with form [I residual, Q residual] where [A, B] means
concatenate. Otherwise it must return the model data in the same form.
label: string
A label to use as a key when storing results from the fit to the
lmfit_results dict.
fit_type: string
Indicates the type of fit to be run. For some types of fits, certain
quantities will automatically be calculated and added to the resonator
object. For instance, 'IQ' will cause the magnitude, phase, I, and Q
as well as associated residuals to be calculated.
kwargs : optional keywords
Use this to override any of the lmfit parameter initial guesses or
toggle whether the paramter varys. Example: ``qi=1e6`` is equivalent
to calling ``Resonator.params['qi'].value = 1e6``. Example:
``qi_vary=False`` will set ``Resonator.params['qi'].vary = False``.
Any parameter name can be used in this way.
"""
assert self.hasParams == True, "Must load params before running a fit."
#Update any of the default Parameter guesses
if kwargs is not None:
for key, val in kwargs.items():
#Allow for turning on and off parameter variation
if '_vary' in key:
key = key.split('_')[0]
if key in self.params.keys():
if (val is True) or (val is False):
self.params[key].vary = val
elif key in self.params.keys():
self.params[key].value = val
else:
raise ValueError("Unknown key: "+key)
# #Make complex vectors of the form cData = [reData, imData]
# cmplxData = np.concatenate((self.I, self.Q), axis=0)
# if (self.sigmaI is not None) and (self.sigmaQ is not None):
# cmplxSigma = np.concatenate((self.sigmaI, self.sigmaQ), axis=0)
# else:
# cmplxSigma = None
# #Create a lmfit minimizer object
# minObj = lf.Minimizer(fitFn, self.params, fcn_args=(self.freq, cmplxData, cmplxSigma))
#Create a lmfit minimizer object
minObj = lf.Minimizer(fitFn, self.params, fcn_args=(self, True))
lmfit_result = minObj.minimize(method = 'leastsq')
#Call the lmfit minimizer method and minimize the residual
if self.lmfit_result is None:
self.lmfit_result = {}
self.lmfit_result[label] = {}
self.lmfit_result[label]['fit_type'] = fit_type
self.lmfit_result[label]['result'] = lmfit_result
self.lmfit_result[label]['values'] = np.asarray([val.value for key, val in lmfit_result.params.items() if val.vary is True])
self.lmfit_result[label]['labels'] = [key for key, val in lmfit_result.params.items() if val.vary is True]
#NOTE: These are likely to be deprecated
if label == 'default':
self.lmfit_vals = self.lmfit_result[label]['values']
self.lmfit_labels = self.lmfit_result[label]['labels']
#Set the hasFit flag NOTE:(scheduled for deprecation)
self.hasFit = True
#NOTE: This whole block may be deprecated
if (fit_type == 'IQ') and (label == 'default'):
#Add the data back to the final minimized residual to get the final fit
#Also calculate all relevant curves
cmplxResult = fitFn(self.lmfit_result[label]['result'].params, self, residual=False)
cmplxResidual = self.lmfit_result[label]['result'].residual
#Split the complex data back up into real and imaginary parts
residualI, residualQ = np.split(cmplxResidual, 2)
resultI, resultQ = np.split(cmplxResult, 2)
resultMag = np.abs(resultI + 1j*resultQ)
resultPhase = np.arctan2(resultQ,resultI)
#Add some results back to the resonator object
self.residualI = residualI
self.residualQ = residualQ
self.resultI = resultI
self.resultQ = resultQ
self.resultMag = resultMag
self.resultPhase = resultPhase
def torch_lmfit(self, label='default'):
r"""Reset all the lmfit attributes to ``None`` and set ``hasFit = False``.
Parameters
----------
label : string (optional)
Choose which fit to kill off.
Return
------
deleted_fit : dict or None
Return the fit that was deleted or None
"""
deleted_fit = None
if self.lmfit_result is not None:
if label in self.lmfit_result.keys():
deleted_fit = self.lmfit_result.pop(label)
if label == 'default':
self.lmfit_vals = None
self.lmfit_labels = None
if (deleted_fit['fit_type'] == 'IQ') and label == 'default':
self.residualI = None
self.residualQ = None
self.resultI = None
self.resultQ = None
self.resultMag = None
self.resultPhase = None
if len(self.lmfit_result.keys()) == 0:
self.lmfit_result = None
self.hasFit = False
return deleted_fit
def do_emcee(self, fitFn, label='default', **kwargs):
r"""Run the Monte-Carlo Markov Chain routine to generate samples for
each parameter given a model.
Parameters
----------
fitFn : function
fitFn must have the signature fitFn(params, res, residual, **kwargs).
If residual == True, fitFn must return a 1D list-like object of
residuals with form [I residual, Q residual] where [A, B] means
concatenate. Otherwise it must return the model data in the same form.
label : string (optional)
A label to assign to the fit results. This will be the dict key they
are stored under in the emcee_results dict. Also, if label matches a
label in lmfit_results, then that params object will be used to seed
the emcee fit.
kwargs : optional keyword arguments
These are passed through to the ``lmfit.Minimizer.emcee`` method.
See the ``lmfit`` documentation for more information.
"""
#Should do the following (have not implemented any of this yet):
#Pack MLE values into their own params object by adding back in non-varying Parameters
#Should consider the ability to filter results for better parameter estimations
#Probably should make a nice easy output to the corner Package
#Smart way to add in error parameter as nuisance without breaking auto-guessing
#minimizerObj.emcee already updates parameters object to result
#This means can call res.emcee_result.params to get results
#Create a lmfit minimizer object
if self.hasFit:
if self.lmfit_result is not None:
if label in self.lmfit_result.keys():
emcee_params = self.lmfit_result[label]['result'].params
else:
assert self.hasParams == True, "Must load params before running emcee."
emcee_params = self.params
minObj = lf.Minimizer(fitFn, emcee_params, fcn_args=(self, True))
#Run the emcee and add the result in
emcee_result = minObj.emcee(**kwargs)
if self.emcee_result is None:
self.emcee_result = {}
self.emcee_result[label] = {}
self.emcee_result[label]['result'] = emcee_result
#Get the emcee 50th percentile data and uncertainties at 16th and 84th percentiles
emcee_vals = np.asarray([np.percentile(emcee_result.flatchain[key], 50) for key in emcee_result.flatchain.keys()])
err_plus = np.asarray([np.percentile(emcee_result.flatchain[key], 84) for key in emcee_result.flatchain.keys()])
err_minus = np.asarray([np.percentile(emcee_result.flatchain[key], 16) for key in emcee_result.flatchain.keys()])
#Pack these values into the fit storage dict
self.emcee_result[label]['values'] = emcee_vals
#Make a list of tuples that are (+err, -err) for each paramter
self.emcee_result[label]['emcee_sigmas'] = list(zip(err_plus-emcee_vals, emcee_vals-err_minus))
#It is also useful to have easy access to the maximum-liklihood estimates
self.emcee_result[label]['mle_vals'] = emcee_result.flatchain.iloc[np.argmax(emcee_result.lnprob)]
#This is useful because only varying parameters have mle vals
self.emcee_result[label]['mle_labels'] = self.emcee_result[label]['mle_vals'].keys()
if label == 'default':
self.emcee_vals = self.emcee_result[label]['values']
#Make a list of tuples that are (+err, -err) for each paramter
self.emcee_sigmas = self.emcee_result[label]['emcee_sigmas']
#It is also useful to have easy access to the maximum-liklihood estimates
self.mle_vals = self.emcee_result[label]['mle_vals']
#This is useful because only varying parameters have mle vals
self.mle_labels = self.emcee_result[label]['mle_labels']
#This is also nice to have explicitly for passing to triangle-plotting routines
self.chain = emcee_result.flatchain.copy()
self.hasChain = True
def burn_flatchain(self, num_samples=0, label='default'):
r"""Burns off num_samples samples from each of the chains and then reflattens. Recalculates all
statistical quantities associated with the emcee run and saves them under the original
label, but with the suffix '_burn' appended to the various keys. Does not modify original chain."""
flatchain_with_burn = pd.DataFrame()
chains = self.emcee_result[label]['result'].chain
for ix, chain in enumerate(chains.T):
flatchain_with_burn[self.emcee_result[label]['mle_labels'][ix]] = chain[num_samples:].flat
#Get the emcee 50th percentile data and uncertainties at 16th and 84th percentiles
emcee_vals = np.asarray([np.percentile(flatchain_with_burn[key], 50) for key in flatchain_with_burn.keys()])
err_plus = np.asarray([np.percentile(flatchain_with_burn[key], 84) for key in flatchain_with_burn.keys()])
err_minus = np.asarray([np.percentile(flatchain_with_burn[key], 16) for key in flatchain_with_burn.keys()])
#Pack these values into the fit storage dict with suffix _burn
self.emcee_result[label]['values_burn'] = emcee_vals
#Make a list of tuples that are (+err, -err) for each paramter
self.emcee_result[label]['emcee_sigmas_burn'] = list(zip(err_plus-emcee_vals, emcee_vals-err_minus))
#TODO: Implement this!
#It is also useful to have easy access to the maximum-liklihood estimates
#self.emcee_result[label]['mle_vals_burn'] = flatchain_with_burn.iloc[np.argmax(emcee_result.lnprob)]
#Add the burned flatchain in its own key
self.emcee_result[label]['flatchain_burn'] = flatchain_with_burn
def torch_emcee(self, label='default'):
r"""Set the emcee-related attributes to ``None`` and ``hasChain = False``.
Parameters
----------
label : string (optional)
Which fit to torch
Return
------
deleted_fit : dict
The fit that is deleted is returned, or None."""
deleted_fit = None
if self.emcee_result is not None:
if label in self.emcee_result.keys():
deleted_fit = self.emcee_result.pop(label)
if label == 'default':
self.emcee_vals = None
self.emcee_sigmas = None
self.mle_vals = None
self.mle_labels = None
self.chain = None
if len(self.emcee_result.keys()) == 0:
self.hasChain = False
self.emcee_result = None
return deleted_fit
#This creates a resonator object from a data dictionary. Optionally performs a fit, and
#adds the fit data back in to the resonator object
def makeResFromData(dataDict, paramsFn = None, fitFn = None, fitFn_kwargs=None, paramsFn_kwargs=None):
"""Create a Resonator object from a data dictionary.
Parameters
----------
dataDict : dict
Must have the following keys: 'I', 'Q', 'temp', 'pwr', 'freq', 'name'.
Optional keys are: 'sigmaI', 'sigmaQ'
paramsFn : function (optional)
A function that initializes and returns an lmfit parameters object for
passing to fitFn.
fitFn : function (optional)
If a fit function is passed, an lmfit minimization will be done
automatically.
fitFn_kwargs : dict (optional)
A dict of keyword arguments passed to fitFn.
paramsFn_kwargs: dict (optional)
A dict of keyword arguments passed to paramsFn.
Returns
-------
res : ``Resonator`` object or ``None``
A Resonator object or ``None`` if there is an error loading the data.
"""
if fitFn is not None:
assert paramsFn is not None, "Cannot pass a fitFn without also passing a paramsFn"
#Check dataDict for validity
expectedKeys = ['name', 'temp', 'pwr', 'freq', 'I', 'Q']
assert all(key in dataDict.keys() for key in expectedKeys), "Your dataDict is missing one or more keys"
resName = dataDict['name']
temp = dataDict['temp']
pwr = dataDict['pwr']
freqData = dataDict['freq']
IData = dataDict['I']
QData = dataDict['Q']
#Process the optional keys
if 'sigmaI' in dataDict.keys():
sigmaI = dataDict['sigmaI']
else:
sigmaI = None
if 'sigmaQ' in dataDict.keys():
sigmaQ = dataDict['sigmaQ']
else:
sigmaQ = None
#create Resonator object
res = Resonator(resName, temp, pwr, freqData, IData, QData, sigmaI, sigmaQ)
#Process the fit parameters
if paramsFn is not None:
if paramsFn_kwargs is not None:
res.load_params(paramsFn, **paramsFn_kwargs)
else:
res.load_params(paramsFn)
#Run a fit on the resonator if a fit function is specified
if fitFn is not None:
if fitFn_kwargs is not None:
res.do_lmfit(fitFn, **fitFn_kwargs)
else:
res.do_lmfit(fitFn)
#Return resonator object
return res
def makeResList(fileFunc, dataPath, resName, **fileFunc_kwargs):
"""Create a list of resonator objects from a directory of dataDict
Parameters
----------
fileFunc : function
A function that converts a single data file into a dictionary. The
resulting dictionary must have the following keys: 'I', 'Q', 'temp',
'pwr', 'freq', 'name', and may have the following ptional keys:
'sigmaI', 'sigmaQ'
dataPath : string
Path to the directory containing the data files that will be processed
by fileFunc.
resName : string
The name of your resonator. This can be anything, but it is useful to
use the same name for every data file that comes from the same physical
resonator.
fileFunc_kwargs : dict
Keyword arguments to pass through to the fileFunc
"""
#Find the files that match the resonator you care about
fileList = glob.glob(dataPath + resName + '_*')
#loop through files and process all the data
fileDataDicts = []
for f in fileList:
fileDataDicts.append(fileFunc(f, **fileFunc_kwargs))
#Create resonator objects from the data
#makeResFromData returns a tuple of (res, temp, pwr),
#but only care about the first one
resList = [makeResFromData(fileDataDict) for fileDataDict in fileDataDicts]
return resList
#Index a list of resonator objects easily
def indexResList(resList, temp=None, pwr=None, **kwargs):
"""Index resList by temp and pwr.
Parameters
----------
resList : list-like
resList is a list of ``scraps.Resonator`` objects
temp : numeric
The temperature of a single Resonator object.
pwr : int
The power of a single Resonator object
itemp : boolean (optional)
Switch to determine whether lookup uses temp or itemp (rounded value of
temp). Default is ``False``.
Returns
-------
index : int or list
Index is the index of the Resonator in resList or a list of indices of
all matches if only pwr or only temp is specified.
Notes
-----
indexResList does not check for duplicates and will return the first match.
"""
itemp = kwargs.pop('itemp', False)
assert itemp in [True, False], "'itemp' must be boolean."
assert (pwr is not None) or (temp is not None), "Must specify at least either a temp or a pwr."
if (pwr is not None) and (temp is not None):
for index, res in enumerate(resList):
if itemp is True:
if res.itemp == temp and res.pwr == pwr:
return index
else:
if np.isclose(res.temp, temp) and res.pwr == pwr:
return index
elif (pwr is None):
index = []
for ix, res in enumerate(resList):
if itemp is True:
if res.itemp == temp:
index.append(ix)
else:
if np.isclose(res.temp, temp):
index.append(ix)
elif (temp is None):
index = []
for ix, res in enumerate(resList):
if res.pwr == pwr:
index.append(ix)
return index
return None
def print_resList(resList):
"""Print all the temperatures and powers in a table-like form"""
#Get all possible powers
pwrs = np.unique([res.pwr for res in resList])
#This will hold a list of temps at each power
tlists = []
max_len = 0
#Populate the lists of temps for each power
for p in pwrs:
tlist = [res.temp for res in resList if res.pwr == p]
tlist.sort()
tlists.append(tlist)
if len(tlist) > max_len:
max_len = len(tlist)
for ix, tlist in enumerate(tlists):
pad = max_len - len(tlist)
tlist = tlist + pad*['NaN']
tlists[ix] = tlist
block = zip(*tlists)
print(repr(list(pwrs)).replace(',', ',\t'))
for b in block:
print(repr(b).replace(',', ',\t'))
def block_check_resList(resList, sdev=0.005, prune=False, verbose=True):
"""Helper tool for preparing a resList with missing data for resSweep"""
#Get all possible powers
pwrs = np.unique([res.pwr for res in resList])
#This will hold a list of temps at each power
tlists = []
#Populate the lists of temps for each power
for p in pwrs:
tlist = [res.temp for res in resList if res.pwr == p]
tlist.sort()
tlists.append(tlist)
#Calculate the lengths and find the shortest one
lens = [len(tl) for tl in tlists]
shortest = min(lens)
if all(el == shortest for el in lens) and verbose:
print('All lists have same length.')
else:
print('Lengths for each set of powers: ',list(zip(pwrs,lens)))
#Zip the lists into tuples and take the standard deviation
#of each tuple. All the elements in each tuple should be
#nominally the same, so the stdev should be small unless
#one of the elements doesn't match. Return the first
#instance of the stdev being too high
block = list(zip(*tlists))
bad_ix = np.argmax([np.std(x) > sdev for x in block])
#If the first row is returned, everything could be ok. Check first row.
if bad_ix == 0:
if np.std(block[0]) < sdev:
bad_ix = -1
if verbose:
print("Bad index: ", bad_ix)
if bad_ix >= 0:
if verbose:
for i in np.arange(-2,3):
if (bad_ix+i < len(block)) and (bad_ix+i >= 0):
print(repr(block[bad_ix+i]).replace(',', ',\t'))
block_ixs = []
for block_ix, block_temp in enumerate(block[bad_ix+i]):
block_ixs.append(indexResList(resList, block_temp, pwrs[block_ix]))
print(repr(block_ixs).replace(',', ',\t'))
#The longer list is where the extra file is most likely
#so return the temp, power, and resList index of the
#suspect.
for i, x in enumerate(block[bad_ix]):
if np.abs(x-np.mean(block[bad_ix])) > np.std(block[bad_ix]):
tl = tlists[i]
t = tl[bad_ix]
p = pwrs[i]
res_ix = indexResList(resList, t, p)
if verbose:
print('T=',t, 'P=',p, 'Res index=',res_ix)
if prune:
resList.pop(res_ix)
| mit |
arank/mxnet | example/kaggle-ndsb1/gen_img_list.py | 15 | 6226 | from __future__ import print_function
import csv
import os
import sys
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in xrange(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
willgrass/pandas | pandas/core/common.py | 1 | 2472 | from cStringIO import StringIO
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas.lib.tseries as tseries
def isnull(input):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if isinstance(input, np.ndarray):
if input.dtype.kind in ('O', 'S'):
# Working around NumPy ticket 1542
result = input.copy().astype(bool)
result[:] = tseries.isnullobj(input)
else:
result = -np.isfinite(input)
else:
result = tseries.checknull(input)
return result
def notnull(input):
'''
Replacement for numpy.isfinite / -numpy.isnan which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if isinstance(input, np.ndarray):
return -isnull(input)
else:
return not tseries.checknull(input)
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = StringIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(StringIO(bytes))
return arr
def _to_str(x):
# str(x) fails when x contains unicode
return '%s' % x
def _pfixed(s, space, nanRep=None, float_format=None):
if isinstance(s, float):
if nanRep is not None and isnull(s):
if np.isnan(s):
return nanRep.ljust(space)
else:
return str(s).ljust(space)
if float_format:
formatted = float_format(s)
else:
formatted = '%.4g' % s
return formatted.ljust(space)
else:
return _to_str(s)[:space].ljust(space)
def get_indexer(source, target, fill_method):
if fill_method:
fill_method = fill_method.upper()
indexer, mask = tseries.getFillVec(source, target, source.indexMap,
target.indexMap, fill_method)
return indexer, mask
def null_out_axis(arr, mask, axis):
if axis == 0:
arr[mask] = np.NaN
else:
indexer = [slice(None)] * arr.ndim
indexer[axis] = mask
arr[tuple(indexer)] = np.NaN
| bsd-3-clause |
computergeek125/writ3562w_itmrp_survey | itmrp-analyzer.py | 1 | 2948 | import argparse
import importlib
import json
import matplotlib
from matplotlib import rcParams as mp_rc
import sys
import nars as Nars
import pdplot as p
import qualtrics_api.Qv3 as Qv3
import qualtrics_api.Qv3_helpers as QH
import run_graphs
import settings
import util as u
parser = argparse.ArgumentParser()
parser.add_argument("-R", "--results", help="Sets the results file without re-downloading the results from Qualtrics", default=None)
args = parser.parse_args()
try:
__IPYTHON__
sys.stderr.write("Type '%matplotlib' (without the quotes) to initialize IPython\n")
except NameError:
sys.stderr.write("Warning: This script was designed for IPython. Running without IPython may yield unexpected results.\n")
def qualtrics_init(reuse=False):
global q
global args
global survey
global survey_data
global N
q = Qv3.Qualtrics_v3(settings.qualtrics_datacenter,settings.qualtrics_api_key)
sys.stdout.write("Loading survey from Qualtrics...")
sys.stdout.write("done!\n")
if args.results:
reuse = True
else:
survey_file = q.response_export(settings.qualtrics_survey,"json")
sys.stdout.write("Opening {0}\n".format(survey_file))
if reuse:
survey_file = args.results
sys.stdout.write("Reusing results from {0}\n".format(survey_file))
with open(survey_file) as data_file:
survey_data = json.load(data_file)
N = len(survey_data['responses'])
survey = q.survey_get(settings.qualtrics_survey)
sys.stdout.write("Imported {0} responses\n".format(N))
sys.stdout.write("Survey Name: {0}\n".format(survey['name']))
def local_init():
global nars
global qh
matplotlib.style.use('ggplot')
mp_rc.update({'figure.autolayout': True})
u.reload_window()
nars = Nars.Nars(survey, survey_data)
qh = QH.QHelpers(q, survey_data)
def init(reuse=False, noQ=False):
if not noQ:
qualtrics_init(reuse=reuse)
local_init()
def nars_calc():
return nars.nars(settings.nars_s1), nars.nars(settings.nars_s2), nars.nars(settings.nars_s3, inverted=True)
def nars_calc2():
return nars.nars(settings.nars_s1), nars.nars(settings.nars_s2_amd), nars.nars(settings.nars_s3_amd, inverted=True)
def nars_mrp_calc():
return nars.nars(settings.nars_itmrp_s1), nars.nars(settings.nars_itmrp_s2), nars.nars(settings.nars_itmrp_s3, inverted=True)
#TODO: Text analysis (report) Grab text with selectable metadata, filtering null answers
def rg(graph=None, **kwargs):
run_graphs.run_graphs(graph=graph, qh=qh, nars=nars, nars_calc=nars_calc, nars_mrp_calc=nars_mrp_calc, **kwargs)
def reload(noQ=False):
sys.stdout.write("Reloading local files with importlib...\n")
importlib.reload(Nars)
importlib.reload(p)
importlib.reload(Qv3)
importlib.reload(QH)
importlib.reload(run_graphs)
importlib.reload(settings)
importlib.reload(u)
init(reuse=True, noQ=noQ)
init() | mit |
moonboots/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 3 | 8770 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a string.
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name).split()
f.close()
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], '->', labels[i, 0])
print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
tf.initialize_all_variables().run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
| apache-2.0 |
TheGentlemanOctopus/thegentlemanoctopus | octopus_code/core/octopus/rpcClients/detect_peaks.py | 5 | 6615 | """Detect peaks in data based on their amplitude and other features."""
from __future__ import division, print_function
import numpy as np
__author__ = "Marcos Duarte, https://github.com/demotu/BMC"
__version__ = "1.0.4"
__license__ = "MIT"
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indexes of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indexes by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
# plt.grid()
plt.show() | gpl-3.0 |
ssaeger/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/mixture/tests/test_gmm.py | 11 | 20915 | # Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_true, assert_greater,
assert_raise_message, assert_warns_message,
ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/model_selection/plot_learning_curve.py | 76 | 4509 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
cbmoore/statsmodels | statsmodels/tsa/base/datetools.py | 27 | 10629 | from statsmodels.compat.python import (lrange, lzip, lmap, string_types, callable,
asstr, reduce, zip, map)
import re
import datetime
from pandas import Period
from pandas.tseries.frequencies import to_offset
from pandas import datetools as pandas_datetools
import numpy as np
#NOTE: All of these frequencies assume end of period (except wrt time)
class _freq_to_pandas_class(object):
# being lazy, don't want to replace dictionary below
def __getitem__(self, key):
return to_offset(key)
_freq_to_pandas = _freq_to_pandas_class()
def _is_datetime_index(dates):
if isinstance(dates[0], (datetime.datetime, Period)):
return True # TimeStamp is a datetime subclass
else:
return False
def _index_date(date, dates):
"""
Gets the index number of a date in a date index.
Works in-sample and will return one past the end of the dates since
prediction can start one out.
Currently used to validate prediction start dates.
If there dates are not of a fixed-frequency and date is not on the
existing dates, then a ValueError is raised.
"""
if isinstance(date, string_types):
date = date_parser(date)
try:
if hasattr(dates, 'indexMap'): # 0.7.x
return dates.indexMap[date]
else:
date = dates.get_loc(date)
try: # pandas 0.8.0 returns a boolean array
len(date)
return np.where(date)[0].item()
except TypeError: # expected behavior
return date
except KeyError as err:
freq = _infer_freq(dates)
if freq is None:
#TODO: try to intelligently roll forward onto a date in the
# index. Waiting to drop pandas 0.7.x support so this is
# cleaner to do.
raise ValueError("There is no frequency for these dates and "
"date %s is not in dates index. Try giving a "
"date that is in the dates index or use "
"an integer" % date)
# we can start prediction at the end of endog
if _idx_from_dates(dates[-1], date, freq) == 1:
return len(dates)
raise ValueError("date %s not in date index. Try giving a "
"date that is in the dates index or use an integer"
% date)
def _date_from_idx(d1, idx, freq):
"""
Returns the date from an index beyond the end of a date series.
d1 is the datetime of the last date in the series. idx is the
index distance of how far the next date should be from d1. Ie., 1 gives
the next date from d1 at freq.
Notes
-----
This does not do any rounding to make sure that d1 is actually on the
offset. For now, this needs to be taken care of before you get here.
"""
return d1 + idx * _freq_to_pandas[freq]
def _idx_from_dates(d1, d2, freq):
"""
Returns an index offset from datetimes d1 and d2. d1 is expected to be the
last date in a date series and d2 is the out of sample date.
Notes
-----
Rounds down the index if the end date is before the next date at freq.
Does not check the start date to see whether it is on the offest but
assumes that it is.
"""
from pandas import DatetimeIndex
return len(DatetimeIndex(start=d1, end=d2,
freq = _freq_to_pandas[freq])) - 1
_quarter_to_day = {
"1" : (3, 31),
"2" : (6, 30),
"3" : (9, 30),
"4" : (12, 31),
"I" : (3, 31),
"II" : (6, 30),
"III" : (9, 30),
"IV" : (12, 31)
}
_mdays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_months_with_days = lzip(lrange(1,13), _mdays)
_month_to_day = dict(zip(map(str,lrange(1,13)), _months_with_days))
_month_to_day.update(dict(zip(["I", "II", "III", "IV", "V", "VI",
"VII", "VIII", "IX", "X", "XI", "XII"],
_months_with_days)))
# regex patterns
_y_pattern = '^\d?\d?\d?\d$'
_q_pattern = '''
^ # beginning of string
\d?\d?\d?\d # match any number 1-9999, includes leading zeros
(:?q) # use q or a : as a separator
([1-4]|(I{1,3}V?)) # match 1-4 or I-IV roman numerals
$ # end of string
'''
_m_pattern = '''
^ # beginning of string
\d?\d?\d?\d # match any number 1-9999, includes leading zeros
(:?m) # use m or a : as a separator
(([1-9][0-2]?)|(I?XI{0,2}|I?VI{0,3}|I{1,3})) # match 1-12 or
# I-XII roman numerals
$ # end of string
'''
#NOTE: see also ts.extras.isleapyear, which accepts a sequence
def _is_leap(year):
year = int(year)
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def date_parser(timestr, parserinfo=None, **kwargs):
"""
Uses dateutil.parser.parse, but also handles monthly dates of the form
1999m4, 1999:m4, 1999:mIV, 1999mIV and the same for quarterly data
with q instead of m. It is not case sensitive. The default for annual
data is the end of the year, which also differs from dateutil.
"""
flags = re.IGNORECASE | re.VERBOSE
if re.search(_q_pattern, timestr, flags):
y,q = timestr.replace(":","").lower().split('q')
month, day = _quarter_to_day[q.upper()]
year = int(y)
elif re.search(_m_pattern, timestr, flags):
y,m = timestr.replace(":","").lower().split('m')
month, day = _month_to_day[m.upper()]
year = int(y)
if _is_leap(y) and month == 2:
day += 1
elif re.search(_y_pattern, timestr, flags):
month, day = 12, 31
year = int(timestr)
else:
if (hasattr(pandas_datetools, 'parser') and
not callable(pandas_datetools.parser)):
# exists in 0.8.0 pandas, but it's the class not the module
return pandas_datetools.parser.parse(timestr, parserinfo,
**kwargs)
else: # 0.8.1 pandas version didn't import this into namespace
from dateutil import parser
return parser.parse(timestr, parserinfo, **kwargs)
return datetime.datetime(year, month, day)
def date_range_str(start, end=None, length=None):
"""
Returns a list of abbreviated date strings.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Returns
-------
date_range : list
List of strings
"""
flags = re.IGNORECASE | re.VERBOSE
#_check_range_inputs(end, length, freq)
start = start.lower()
if re.search(_m_pattern, start, flags):
annual_freq = 12
split = 'm'
elif re.search(_q_pattern, start, flags):
annual_freq = 4
split = 'q'
elif re.search(_y_pattern, start, flags):
annual_freq = 1
start += 'a1' # hack
if end:
end += 'a1'
split = 'a'
else:
raise ValueError("Date %s not understood" % start)
yr1, offset1 = lmap(int, start.replace(":","").split(split))
if end is not None:
end = end.lower()
yr2, offset2 = lmap(int, end.replace(":","").split(split))
length = (yr2 - yr1) * annual_freq + offset2
elif length:
yr2 = yr1 + length // annual_freq
offset2 = length % annual_freq + (offset1 - 1)
years = np.repeat(lrange(yr1+1, yr2), annual_freq).tolist()
years = np.r_[[str(yr1)]*(annual_freq+1-offset1), years] # tack on first year
years = np.r_[years, [str(yr2)]*offset2] # tack on last year
if split != 'a':
offset = np.tile(np.arange(1, annual_freq+1), yr2-yr1-1)
offset = np.r_[np.arange(offset1, annual_freq+1).astype('a2'), offset]
offset = np.r_[offset, np.arange(1,offset2+1).astype('a2')]
date_arr_range = [''.join([i, split, asstr(j)]) for i,j in
zip(years, offset)]
else:
date_arr_range = years.tolist()
return date_arr_range
def dates_from_str(dates):
"""
Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
dates : array-like
A sequence of abbreviated dates as string. For instance,
'1996m1' or '1996Q1'. The datetime dates are at the end of the
period.
Returns
-------
date_list : array
A list of datetime types.
"""
return lmap(date_parser, dates)
def dates_from_range(start, end=None, length=None):
"""
Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Examples
--------
>>> import statsmodels.api as sm
>>> dates = sm.tsa.datetools.date_range('1960m1', length=nobs)
Returns
-------
date_list : array
A list of datetime types.
"""
dates = date_range_str(start, end, length)
return dates_from_str(dates)
def _add_datetimes(dates):
return reduce(lambda x, y: y+x, dates)
def _infer_freq(dates):
maybe_freqstr = getattr(dates, 'freqstr', None)
if maybe_freqstr is not None:
return maybe_freqstr
try:
from pandas.tseries.api import infer_freq
freq = infer_freq(dates)
return freq
except ImportError:
pass
timedelta = datetime.timedelta
nobs = min(len(dates), 6)
if nobs == 1:
raise ValueError("Cannot infer frequency from one date")
if hasattr(dates, 'values'):
dates = dates.values # can't do a diff on a DateIndex
diff = np.diff(dates[:nobs])
delta = _add_datetimes(diff)
nobs -= 1 # after diff
if delta == timedelta(nobs): #greedily assume 'D'
return 'D'
elif delta == timedelta(nobs + 2):
return 'B'
elif delta == timedelta(7*nobs):
return 'W'
elif delta >= timedelta(28*nobs) and delta <= timedelta(31*nobs):
return 'M'
elif delta >= timedelta(90*nobs) and delta <= timedelta(92*nobs):
return 'Q'
elif delta >= timedelta(365 * nobs) and delta <= timedelta(366 * nobs):
return 'A'
else:
return
| bsd-3-clause |
Jozhogg/iris | docs/iris/example_code/General/polynomial_fit.py | 7 | 1443 | """
Fitting a polynomial
====================
This example demonstrates computing a polynomial fit to 1D data from an Iris
cube, adding the fit to the cube's metadata, and plotting both the 1D data and
the fit.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path('A1B_north_america.nc')
cube = iris.load_cube(fname)
# Extract a single time series at a latitude and longitude point.
location = next(cube.slices(['time']))
# Calculate a polynomial fit to the data at this time series.
x_points = location.coord('time').points
y_points = location.data
degree = 2
p = np.polyfit(x_points, y_points, degree)
y_fitted = np.polyval(p, x_points)
# Add the polynomial fit values to the time series to take
# full advantage of Iris plotting functionality.
long_name = 'degree_{}_polynomial_fit_of_{}'.format(degree, cube.name())
fit = iris.coords.AuxCoord(y_fitted, long_name=long_name,
units=location.units)
location.add_aux_coord(fit, 0)
qplt.plot(location.coord('time'), location, label='data')
qplt.plot(location.coord('time'),
location.coord(long_name),
'g-', label='polynomial fit')
plt.legend(loc='best')
plt.title('Trend of US air temperature over time')
qplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
PythonCharmers/bokeh | bokeh/sampledata/daylight.py | 45 | 2522 | """Daylight hours from http://www.sunrisesunset.com """
from __future__ import absolute_import
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
import pandas as pd
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
| bsd-3-clause |
pravsripad/jumeg | jumeg/decompose/ocarta.py | 3 | 78394 | # Authors: Lukas Breuer <[email protected]>
"""
----------------------------------------------------------------------
--- jumeg.decompose.ocarta -------------------------------------------
----------------------------------------------------------------------
author : Lukas Breuer
email : [email protected]
last update: 14.06.2016
version : 1.2
----------------------------------------------------------------------
Based on following publications:
----------------------------------------------------------------------
L. Breuer, J. Dammers, T.P.L. Roberts, and N.J. Shah, 'Ocular and
Cardiac Artifact Rejection for Real-Time Analysis in MEG',
Journal of Neuroscience Methods, Jun. 2014
(doi:10.1016/j.jneumeth.2014.06.016)
L. Breuer, J. Dammers, T.P.L. Roberts, and N.J. Shah, 'A Constrained
ICA Approach for Real-Time Cardiac Artifact Rejection in
Magnetoencephalography', IEEE Transactions on Biomedical Engineering,
Feb. 2014 (doi:10.1109/TBME.2013.2280143).
----------------------------------------------------------------------
How to use the OCARTA?
----------------------------------------------------------------------
from jumeg.decompose import ocarta
ocarta_obj = ocarta.JuMEG_ocarta()
ocarta_obj.fit(fn_raw)
--> for further comments we refer directly to the functions
----------------------------------------------------------------------
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# import necessary modules
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
import random
import numpy as np
try:
from sklearn.utils.extmath import fast_dot
except ImportError:
fast_dot = np.dot
#######################################################
# #
# some general functions #
# #
#######################################################
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to fit the sigmoidal function to the cdf of
# a signal
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _fit_sigmoidal_to_cdf(ref_signal):
"""
Fits the sigmoidal function to the cumulative density
function (cdf) of the input data by estimating the
parameter a0 and a0 according to:
1.0
----------------------
1.0 + a0 * exp(-a1 * x)
"""
# import necessary modules
from scipy.optimize import curve_fit
from jumeg import jumeg_math as pre_math
# rescale signal to the range [0, 1]
ref_signal = pre_math.rescale(ref_signal, 0, 1)
# estimate cdf
num_bins = int(np.round(np.sqrt(ref_signal.shape[0])))
x = np.linspace(0, 1, num_bins)
counts, _ = np.histogram(ref_signal, bins=num_bins, density=True)
cdf = np.cumsum(counts)
# normalize cdf
cdf /= cdf[cdf.shape[0]-1]
# fit sigmoidal function to normalized cdf
opt_para, cov_para = curve_fit(pre_math.sigm_func, x, cdf)
if cov_para[0, 0] > 100:
opt_para[0] /= np.sqrt(cov_para[0, 0])
if cov_para[1, 1] > 100:
opt_para[1] /= np.sqrt(cov_para[1, 1])
# return optimal cost_function parameter
return opt_para
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to generate epochs around a given event
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def epochs(data, idx_event, sfreq, tpre, tpost):
# get indices of the time window around the event
idx_pre_event = int(tpre * sfreq)
idx_post_event = int(tpost * sfreq)
# define some parameter
nsamp = idx_post_event - idx_pre_event + 1
if len(data.shape) == 2:
nchan, ntsl = data.shape
else:
nchan = 1
ntsl = len(data)
data = data.reshape(nchan, ntsl)
# check if time windows are in the data range
if hasattr(idx_event, "__len__"):
idx_event = idx_event[((idx_event+idx_pre_event) > 0) & ((idx_event+idx_post_event) < ntsl)]
nevents = idx_event.shape[0]
bool_array = True
else:
nevents = 1
bool_array = False
if nevents == 0:
return -1
# create array for new epochs
epoch_data = np.zeros((nevents, nchan, nsamp), dtype=np.float64)
if bool_array is False:
epoch_data[0, :, :] = data[:, int(idx_event+idx_pre_event):int(idx_event+idx_post_event+1)]
else:
for i in range(nevents):
epoch_data[i, :, :] = data[:, int(idx_event[i]+idx_pre_event):int(idx_event[i]+idx_post_event+1)]
# return epoch data
return epoch_data
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# OCARTA constrained ICA implementation
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def ocarta_constrained_ICA(data, initial_weights=None, lrate=None, block=None, wchange=1e-16,
annealdeg=60., annealstep=0.9, maxsteps=200, ca_idx=None,
ca_cost_func=[1., 1.], oa_idx=None, oa_cost_func=[1., 1.],
sphering=None, oa_template=[], fixed_random_state=None):
"""
Run the OCARTA constrained ICA decomposition on raw data
Parameters
----------
data : data array [nchan, ntsl] for decomposition
initial_weights : initialize weights matrix
default: None --> identity matrix is used
lrate : initial learning rate (for most applications 1e-3 is
a good start)
--> smaller learining rates will slowering the convergence
it merely indicates the relative size of the change in weights
default: lrate = 0.010d/alog(nchan^2.0)
block : his block size used to randomly extract (in time) a chop
of data
default: block = floor(sqrt(ntsl/3d))
wchange : iteration stops when weight changes is smaller then this
number
default: wchange = 1e-16
annealdeg : if angle delta is larger then annealdeg (in degree) the
learning rate will be reduce
default: annealdeg = 60
annealstep : the learning rate will be reduced by this factor:
lrate *= annealstep
default: annealstep = 0.9
maxsteps : maximum number of iterations to be done
default: maxsteps = 200
ca_idx: array
indices of the columns of the weight matrix where 'ca_cost_func'
should be used as cost-function
ca_cost_func : array with 2 elements a0 and a1
cost-function for cardiac activity:
c(x) = 1.0 / (1.0 + a0 * exp(a1 * x))
Note: Is only used if keyword 'ca_idx' is set
default: [1., 1.] --> sigmoidal function is used
oa_idx: array
indices of the columns of the weight matrix where 'oa_cost_func'
should be used as cost-function
oa_cost_func : array with 2 elements a0 and a1
cost-function for ocular activity:
c(x) = 1.0 / (1.0 + a0 * exp(a1 * x))
Note: Is only used if keyword 'oa_idx' is set
default: [1., 1.] --> sigmoidal function is used
sphering : sphering matrix used to whiten the data.
oa_template : spatial template of ocular activity. If set one column
of the demixing matrix is updated according to the template.
default: oa_template=None
Returns
-------
weights : un-mixing matrix
activations : underlying sources
"""
# import necessary modules
from scipy.linalg import pinv
from scipy.stats.stats import pearsonr
from jumeg import jumeg_math as pre_math
from math import copysign as sgn
import math
# define some default parameter
default_max_weight = 1e8
default_restart_fac = 0.9
default_blowup = 1e4
default_blowup_fac = 0.5
default_nsmall_angle = 20
degconst = 180.0 / np.pi
# check data shape
ntsl, npc = data.shape
# normalize data
# --> to prevent an overflow in exp() estimation
norm_factor = np.max(abs(data))
data /= norm_factor
if (npc < 2) or (ntsl < npc):
raise ValueError('Data size too small!')
npc_square = npc ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if lrate == None:
lrate = 0.01/math.log(npc ** 2.0)
if block == None:
block = int(math.floor(math.sqrt(ntsl/3.0)))
# collect parameter
nblock = ntsl / block
lastt = (nblock - 1) * block + 1
# initialize training
if np.any(initial_weights):
# use unitrary version of input matrix
from scipy.linalg import sqrtm
weights = np.dot(sqrtm(np.linalg.inv(np.dot(initial_weights,
initial_weights.conj().transpose()))), initial_weights)
else:
# initialize weights as identity matrix
weights = np.identity(npc, dtype=np.float64)
BI = block * np.identity(npc, dtype=np.float64)
bias = np.zeros((npc, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
istep = 0
count_small_angle = 0
wts_blowup = False
# ..................................
# trainings loop
# ..................................
while istep < maxsteps:
# ..................................
# shuffel data at each step
# ..................................
if fixed_random_state:
random.seed(istep) # --> permutation is fixed but differs at each step
else:
random.seed(None)
permute = list(range(ntsl))
random.shuffle(permute)
# ..................................
# ICA training block
# loop across block samples
# ..................................
for t in range(0, lastt, block):
u_ = fast_dot(data[permute[t:t + block], :], weights) + fast_dot(bias, onesrow).T
# ..................................
# logistic ICA weights updates
# ..................................
y = pre_math.sigm_func(u_)
if ca_idx is not None:
y[:, ca_idx] = pre_math.sigm_func(u_[:, ca_idx], ca_cost_func[0], ca_cost_func[1])
if oa_idx is not None:
y[:, oa_idx] = pre_math.sigm_func(u_[:, oa_idx], oa_cost_func[0], oa_cost_func[1])
weights += lrate * fast_dot(weights, BI + fast_dot(u_.T, (1.0 - 2.0 * y)))
bias += (lrate * np.sum((1.0 - 2.0 * y), axis=0, dtype=np.float64)).reshape(npc, 1)
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > default_max_weight:
wts_blowup = True
if wts_blowup:
break
# ..................................
# update weights for ocular activity
# .................................
if ((istep % 20) == 0) and not np.all(oa_template == 0):
# ..................................
# generate spatial maps
# ..................................
spatial_maps = fast_dot(sphering.T, pinv(weights.T)).T
# ..................................
# estimate correlation between
# template and spatial maps
# ..................................
spatial_corr = np.zeros(npc)
for imap in range(npc):
spatial_corr[imap] = pearsonr(spatial_maps[imap], oa_template)[0]
# ..................................
# update column of weights which
# is most similar to ocular activity
# ..................................
imax = np.argmax(np.abs(spatial_corr))
c = np.abs(spatial_corr[imax])
oa_min = np.min(spatial_maps[imax])
oa_max = np.max(spatial_maps[imax])
spatial_maps[imax] = c * spatial_maps[imax] + (1. - c) * \
pre_math.rescale(sgn(1., spatial_corr[imax]) * oa_template, oa_min, oa_max)
# ..................................
# back-transform spatial maps
# ..................................
weights = pinv(fast_dot(sphering, spatial_maps.T)).T
# ..................................
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
# ..................................
if not wts_blowup:
oldwtchange = weights - oldweights
istep += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, npc_square)
change = np.sum(delta * delta) #, dtype=np.float64)
if istep > 1:
angledelta = math.acos(np.sum(delta * olddelta)/math.sqrt(change * oldchange)) * degconst
# ..................................
# anneal learning rate
# ..................................
oldweights = weights.copy()
if angledelta > annealdeg:
lrate *= annealstep # anneal learning rate
olddelta = delta # accumulate angledelta until annealdeg reached lrates
oldchange = change
count_small_angle = 0
else:
if istep == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if (count_small_angle > default_nsmall_angle):
istep = maxsteps
# ..................................
# apply stopping rule
# ..................................
if (istep > 2) and (change < wchange):
istep = maxsteps
elif change > default_blowup:
lrate *= default_blowup_fac
# ..................................
# restart if weights blow up
# (for lowering lrate)
# ..................................
else:
istep = 0 # start again
wts_blowup = 0 # re-initialize variables
lrate *= default_restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, npc_square), dtype=np.float64)
bias = np.zeros((npc, 1), dtype=np.float64)
# ..................................
# prepare return values
# ..................................
data *= norm_factor # reverse normalization (cf. line 226)
weights = weights.T # to be conform with col/row convention outside this routine
activations = fast_dot(weights, data.T)
# return results
return weights, activations
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to identify ICs belonging to cardiac
# artifacts
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def identify_cardiac_activity(activations, idx_R_peak, sfreq=1017.25, ecg_flow=8,
ecg_fhigh=16, order=4, tpre=-0.3, tpost=0.7,
thresh_kui_ca=0.4):
"""
Function to identify independent components (ICs) belonging
to cardiac activity. The identification is based on cross-trial-
phase-statistics (CTPS) as introduced be Dammers et al. (2008).
Parameters
----------
activations : data array [nchan, ntsl] of underlying sources
(ICs) as achieved by ICA
idx_R_peak : array containing the indices of the R-peaks
sfreq : sampling frequency
default: sfreq=1017.25
order: filter order
default: 4
ecg_flow : float
low cut-off frequency in Hz
ecg_fhigh : float
high cut-off frequency in Hz
tpre : time before R-peak (to create Epochs) in seconds
default: tpre=-0.3
tpost : time after R-peak (to create Epochs) in seconds
default: tpost=0.7
thresh_kui_ca : float
threshold for the normalized kuiper statistic to identify
ICs belonging to cardiac activity. Must be in the range
between 0. and 1.
Returns
-------
idx_ca : array of indices of ICs belonging to cardiac
activity
"""
# import necessary modules
from mne.preprocessing.ctps_ import ctps
from jumeg.filter import jumeg_filter
# first filter ICs to the main frequency
# range of cardiac activity
act_filtered = activations.copy()
jfi_bw_bp = jumeg_filter(filter_method='bw', filter_type='bp', fcut1=ecg_flow,
fcut2=ecg_fhigh, sampling_frequency=sfreq, order=order)
jfi_bw_bp.apply_filter(act_filtered)
# create epochs around the R-peak
activations_epochs = epochs(act_filtered, idx_R_peak, sfreq, tpre, tpost)
# estimate CTPS
_, pk_dynamics, _ = ctps(activations_epochs, is_raw=True)
del _
pk_values = np.max(pk_dynamics, axis=1)
idx_ca = np.where(pk_values >= thresh_kui_ca)[0]
# check that at least one and at maximum
# three ICs belong to CA
if len(idx_ca) == 0:
idx_ca = [np.argmax(pk_values)]
elif len(idx_ca) > 5:
idx_ca = np.argsort(pk_values)[-5:]
# return indices
return np.array(idx_ca)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to identify ICs belonging to ocular
# artifacts
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def identify_ocular_activity(activations, eog_signals, spatial_maps,
oa_template, sfreq=1017.25, order=4,
eog_flow=1, eog_fhigh=10, thresh_corr_oa=0.8):
"""
Function to identify independent components (ICs) belonging
to ocular activity. The identification is based on correlation
analysis between the ICs and the EOG signal
Parameters
----------
activations : data array [nchan, ntsl] of underlying sources
(ICs) as achieved by ICA
eog_signals : data vector containing EOG-signals
spatial_maps : maps representing the spatial orientation
of the ICs (when performing temporal ICA the spatial
information is stored in the columns of the mixing-matrix)
oa_template : spatial template of ocular activity
sfreq : sampling frequency
default: sfreq=1017.25
order: filter order
default: 4
eog_flow : float
low cut-off frequency in Hz
eog_fhigh : float
high cut-off frequency in Hz
n_jobs : nt | str
number of jobs to run in parallel. Can be 'cuda' if
scikits.cuda is installed properly, CUDA is initialized,
and method='fft'.
thresh_corr_oa : float
threshold for the correlation statistic to identify ICs
belonging to cardiac activity. Should be in the range
between 0. and 1.
Returns
-------
idx_oa : array of indices of ICs belonging to ocular
activity
"""
# import necessary modules
from jumeg.filter import jumeg_filter
from scipy.stats.stats import pearsonr
fi_bp_bw = jumeg_filter(filter_method='bw', filter_type='bp', fcut1=eog_flow,
fcut2=eog_fhigh, sampling_frequency=sfreq, order=order)
# first filter ICs to the main frequency
# range of ocular activity
act_filtered = activations.copy()
fi_bp_bw.apply_filter(act_filtered)
eog_filtered = eog_signals.copy()
fi_bp_bw.apply_filter(eog_filtered)
# estimate person correlation
nchan, _ = activations.shape
temp_corr = np.zeros(nchan)
spatial_corr = np.zeros(nchan)
for i in range(nchan):
temp_corr[i] = np.abs(pearsonr(act_filtered[i], eog_filtered)[0])
spatial_corr[i] = np.abs(pearsonr(spatial_maps[i], oa_template)[0])
# check where the correlation is above threshold
if np.all(oa_template == 0):
idx_oa = np.arange(nchan)[temp_corr > (thresh_corr_oa*0.5)]
else:
idx_oa = np.arange(nchan)[(temp_corr+spatial_corr) > thresh_corr_oa]
# check that at least one and at maximum
# three ICs belong to OA
if len(idx_oa) == 0:
if np.all(oa_template == 0):
idx_oa = [np.argmax(temp_corr)]
else:
idx_oa = [np.argmax((temp_corr + spatial_corr))]
elif len(idx_oa) > 5:
if np.all(oa_template == 0):
idx_oa = np.argsort(temp_corr)[-5:]
else:
idx_oa = np.argsort((temp_corr + spatial_corr))[-5:]
# return results
return idx_oa
########################################################
# #
# JuMEG_ocarta class #
# #
########################################################
class JuMEG_ocarta(object):
def __init__(self, name_ecg='ECG 001', ecg_freq=[8, 16],
thresh_ecg=0.3, name_eog='EOG 002', eog_freq=[1, 10],
seg_length=30.0, shift_length=10.0,
percentile_eog=80, npc=None, explVar=0.95, lrate=None,
maxsteps=100, flow=1.0, fhigh=20.0,
dim_reduction='explVar'):
"""
Create ocarta object from raw data file.
Optional parameters
-------------------
name_ecg : string
Name of the ECG channel.
default: name_ecg='ECG 001'
ecg_freq: two elementary int | float array
[low, high] cut-off frequency in Hz for ECG signal to identify R-peaks
default: ecg_freq=[10,20]
name_eog : string
Name of the EOG channel.
default: name_eog='EOG 002'
eog_freq : two elementary int | float array
[low, high] cut-off frequency in Hz for EOG signal to identify eye-blinks
default: eog_freq=[1,10]
seg_length : int | float
length of the data segments to be processed (in s).
default: seg_length=30.0
shift_length : int | float
length of the shift from one to another data segment (in s).
default: shift_length=10.0
npc : int
The number of PCA components used after ICA recomposition. The ensuing
attribute allows to balance noise reduction against potential loss of
features due to dimensionality reduction.
explVar : float | None
Must be between 0 and 1. If float, the number of components selected
matches the number of components with a cumulative explained variance
of 'explVar'
default: explVar=0.95
lrate : initial learning rate (for most applications 1e-3 is a good start)
--> smaller learining rates will slowering the convergence it merely
indicates the relative size of the change in weights
default: lrate=None
maxsteps: maximum number of iterations to be done
default: maxsteps=50
flow: if set data to estimate the optimal de-mixing matrix are filtered
prior to estimation. Note, data cleaning is applied to unfiltered
input data
fhigh: if set data to estimate the optimal de-mixing matrix are filtered
prior to estimation. Note, data cleaning is applied to unfiltered
input data
Returns
-------
ocarta_obj : instance of jumeg.decompose.ocarta.JuMEG_ocarta
"""
self._block = 0
self._ecg_freq = ecg_freq
self._eog_freq = eog_freq
self._eog_signals_tkeo = None
self._explVar = explVar
self._idx_eye_blink = None
self._idx_R_peak = None
self._lrate = lrate
self._maxsteps = maxsteps
self._name_ecg = name_ecg
self._name_eog = name_eog
self._npc = npc
self._ntsl = 0
self._opt_cost_func_cardiac = [1.0, 1.0]
self._opt_cost_func_ocular = [1.0, 1.0]
self._pca = None
self._percentile_eog = percentile_eog
self._picks = None
self._seg_length = seg_length
self._shift_length = shift_length
self._system = None
self._template_OA = None
self._thresh_ca = thresh_ecg
self._thresh_eog = 0.0
self._performance_ca = 0.0
self._performance_oa = 0.0
self._freq_corr_ca = 0.0
self._freq_corr_oa = 0.0
self._flow = flow
self._fhigh =fhigh
self._dim_reduction = dim_reduction
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get name of the ECG-channel
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_name_ecg(self, name_ecg):
self._name_ecg = name_ecg
def _get_name_ecg(self):
return self._name_ecg
name_ecg = property(_get_name_ecg, _set_name_ecg)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get dimesnion reduction method
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_dim_reduction(self, dim_reduction):
if dim_reduction in ['', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar']:
self._dim_reduction = dim_reduction
else:
print("Dimension reduction method must be one of the following:")
print("AIC, BIC, GAP, MDL, MIBS or explVar")
print("Programm stops")
import pdb
pdb.set_trace()
def _get_dim_reduction(self):
return self._dim_reduction
dim_reduction = property(_get_dim_reduction, _set_dim_reduction)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get optimal frequencies to identify heart beats
# NOTE: Array with two elements expected
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_ecg_freq(self, ecg_freq):
if len(ecg_freq) == 2:
self._ecg_freq = ecg_freq
else:
print('NOTE: Two elementary array expected!')
def _get_ecg_freq(self):
return self._ecg_freq
ecg_freq = property(_get_ecg_freq, _set_ecg_freq)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get optimal threshold to identify cardiac activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_thresh_ecg(self, thresh_ecg):
if abs(thresh_ecg) < 1.0:
self._thresh_ca = abs(thresh_ecg)
else:
print('NOTE: Threshold to identify cardiac activity must be between 0 and 1!')
def _get_thresh_ecg(self):
return self._thresh_ca
thresh_ecg = property(_get_thresh_ecg, _set_thresh_ecg)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get indices of R-peaks
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_idx_R_peak(self, idx_R_peak):
self._idx_R_peak = idx_R_peak
def _get_idx_R_peak(self):
return self._idx_R_peak
idx_R_peak = property(_get_idx_R_peak, _set_idx_R_peak)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get name of the EOG-channel
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_name_eog(self, name_eog):
self._name_eog = name_eog
def _get_name_eog(self):
return self._name_eog
name_eog = property(_get_name_eog, _set_name_eog)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get optimal frequencies to identify eye blinks
# NOTE: Array with two elements expected
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_eog_freq(self, eog_freq):
if len(eog_freq) == 2:
self._eog_freq = eog_freq
else:
print('NOTE: Two elementary array expected!')
def _get_eog_freq(self):
return self._eog_freq
eog_freq = property(_get_eog_freq, _set_eog_freq)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get indices of eye-blinks
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_idx_eye_blink(self, idx_eye_blink):
self._idx_eye_blink = idx_eye_blink
def _get_idx_eye_blink(self):
return self._idx_eye_blink
idx_eye_blink = property(_get_idx_eye_blink, _set_idx_eye_blink)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get optimale cost-function for cardiac activity
# NOTE: Array with two elements expected
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_opt_cost_func_cardiac(self, cost_func):
self._opt_cost_func_cardiac = cost_func
def _get_opt_cost_func_cardiac(self):
return self._opt_cost_func_cardiac
opt_cost_func_cardiac = property(_get_opt_cost_func_cardiac, _set_opt_cost_func_cardiac)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get optimale cost-function for ocular activity
# NOTE: Array with two elements expected
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_opt_cost_func_ocular(self, cost_func):
self._opt_cost_func_ocular = cost_func
def _get_opt_cost_func_ocular(self):
return self._opt_cost_func_ocular
opt_cost_func_ocular = property(_get_opt_cost_func_ocular, _set_opt_cost_func_ocular)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get length of the processed data segments (in s)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_seg_length(self, seg_length):
self._seg_length = abs(seg_length)
def _get_seg_length(self):
return self._seg_length
seg_length = property(_get_seg_length, _set_seg_length)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get length of the data shift between two data
# segments (in s)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_shift_length(self, shift_length):
self._shift_length = abs(shift_length)
def _get_shift_length(self):
return self._shift_length
shift_length = property(_get_shift_length, _set_shift_length)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get explained variance for the number of components
# used in the ICA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_explVar(self, explVar):
self._explVar = abs(explVar)
def _get_explVar(self):
return self._explVar
explVar = property(_get_explVar, _set_explVar)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get the number of components used in the ICA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_npc(self, npc):
self._npc = abs(npc)
def _get_npc(self):
return self._npc
npc = property(_get_npc, _set_npc)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get learning rate in the ICA implementation
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_lrate(self, lrate):
self._lrate = abs(lrate)
def _get_lrate(self):
return self._lrate
lrate = property(_get_lrate, _set_lrate)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get number of maximal steps performed in ICA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_maxsteps(self, maxsteps):
self._maxsteps = abs(maxsteps)
def _get_maxsteps(self):
return self._maxsteps
maxsteps = property(_get_maxsteps, _set_maxsteps)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get performance value related to cardiac activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_perf_rej_ca(self, perf_val):
self._performance_ca = abs(perf_val)
def _get_perf_rej_ca(self):
return self._performance_ca
performance_ca = property(_get_perf_rej_ca, _set_perf_rej_ca)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get performance value related to ocular activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_perf_rej_oa(self, perf_val):
self._performance_oa = abs(perf_val)
def _get_perf_rej_oa(self):
return self._performance_oa
performance_oa = property(_get_perf_rej_oa, _set_perf_rej_oa)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get frequency correlation related to cardiac activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_freq_corr_ca(self, freq_corr):
self._freq_corr_ca = abs(freq_corr)
def _get_freq_corr_ca(self):
return self._freq_corr_ca
freq_corr_ca = property(_get_freq_corr_ca, _set_freq_corr_ca)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get frequency correlation related to ocular activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_freq_corr_oa(self, freq_corr):
self._freq_corr_oa = abs(freq_corr)
def _get_freq_corr_oa(self):
return self._freq_corr_oa
freq_corr_oa = property(_get_freq_corr_oa, _set_freq_corr_oa)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get low frequency range if data should be filtered
# prior to the estimation of the demixing matrix
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_flow(self, flow):
self._flow = abs(flow)
def _get_flow(self):
return self._flow
flow = property(_get_flow, _set_flow)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set/get upper frequency range if data should be
# filtered prior to the estimation of the demixing matrix
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _set_fhigh(self, fhigh):
self._fhigh = abs(fhigh)
def _get_fhigh(self):
return self._fhigh
fhigh = property(_get_fhigh, _set_fhigh)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# spatial template of ocular activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_template_oa(self, picks_all):
"""
This function returns the optimal template for ocular
activity dependent on the used MEG system.
"""
if self._system == 'magnesWH3600':
oa_template = np.zeros(248)
idx = [41, 64, 65, 66, 91, 92, 93, 94, 95, 114, 115, 116, 123, 124, 125,
126, 127, 146, 147, 148, 152, 153, 154, 155, 173, 174, 175, 176,
177, 178, 192, 193, 194, 210, 211, 212, 226, 227, 228, 246, 247]
oa_template[idx] = [-0.21419708, -0.22414582, -0.23823837, -0.22548739,
-0.20605918, -0.27002638, -0.28440455, -0.28815480,
-0.24950478, 0.22117308, 0.29407277, 0.22017770,
-0.27574748, -0.41399348, -0.38132934, -0.35345995,
-0.26804101, 0.31008617, 0.41633716, 0.41061879,
-0.63642773, -0.50244379, -0.39267986, -0.20910069,
0.45186911, 0.65563883, 0.75937563, -0.73426719,
-0.51053563, -0.40412956, 0.56405808, 0.76393096,
1.26573280, 0.20691632, -0.52849269, -0.33448858,
0.51931741, 0.86917479, -0.26111224, 0.25098986,
0.44863074]
elif self._system == 'CTF-275':
oa_template = np.array([-0.058565141, -0.11690785, -0.17268385, -0.15426008, -0.20032253,
-0.15363393, -0.12323404, -0.10946847, -0.16916947, -0.14746442,
-0.15358254, -0.14400186, -0.15525403, -0.15283391, -0.13544806,
-0.17018204, -0.063472347, -0.10674760, -0.10030443, -0.11342886,
-0.13479470, -0.052915536, -0.024286532, -0.055881446, 0.0037911439,
-0.032562383, -0.14573821, -0.29425978, -0.0045026940, -0.031647166,
-0.10888827, -0.045307071, -0.13936511, -0.046178482, -0.084780686,
-0.076642890, -0.036790318, -0.075410101, -0.044708814, -0.084798443,
-0.11400239, -0.16520238, -0.014120410, -0.081479993, -0.097965143,
-0.11635242, -0.14776817, -0.17646771, -0.080756626, -0.11254949,
-0.087876982, -0.14841610, -0.17512911, -0.20432370, -0.070218149,
-0.058648725, -0.13394765, -0.045302358, -0.10417176, -0.15566306,
-0.11492872, -0.10548316, -0.095742287, -0.13736693, -0.092999466,
-0.10288697, -0.11555681, -0.11282008, -0.082011793, -0.049344792,
-0.088065540, -0.11053412, -0.12065042, -0.025757443, -0.027820728,
-0.082922248, -0.12122259, -0.15043460, -0.052105187, -0.15553202,
-0.14986676, -0.014437410, -0.090186754, -0.15645345, -0.16031683,
-0.13582460, -0.034788139, -0.13993048, -0.16867599, -0.15442359,
-0.11393539, -0.074824826, -0.11928964, -0.13316035, -0.14855343,
-0.15660267, -0.10442158, -0.11282534, -0.17358998, -0.13321466,
-0.10717522, -0.086176787, -0.075780353, -0.14099021, -0.28022000,
-0.26693972, -0.21092154, -0.17802375, -0.13204559, -0.12027664,
-0.076974510, -0.45429123, -0.41849051, -0.32964312, -0.25982543,
-0.18627639, -0.14125467, -0.11137423, -0.53589574, -0.46382467,
-0.36122694, -0.27124481, -0.20924367, -0.15347565, -0.099263216,
-0.52728865, -0.42379039, -0.36164611, -0.28821427, -0.22000020,
-0.14784679, -0.11590759, 0.036824802, 0.093934452, 0.13097195,
0.14522522, 0.15277589, 0.070567862, 0.058642875, 0.088307732,
0.12242332, 0.14752465, 0.12698872, 0.081547945, 0.11954144,
0.083645453, 0.096368518, 0.066791858, 0.011411852, 0.065904644,
0.060074836, 0.048916143, 0.017195015, -0.017013312, -0.0071025117,
-0.0093241514, -0.031171524, -0.010059101, 0.074217858, 0.21455144,
-0.035040070, -0.0091646982, 0.050761747, -0.012930817, 0.058960765,
0.0063172897, 0.025850518, 0.017197767, -0.020378035, 0.0044334725,
0.017243069, 0.057735566, 0.068522080, 0.10762666, -0.061766704,
0.017947565, 0.079977442, 0.059938679, 0.097308417, 0.11610799,
0.0054828443, 0.066051916, 0.067836441, 0.11593674, 0.12678335,
0.13789155, 0.012435442, 0.013607388, 0.080161115, -0.036834136,
-0.010289298, 0.035043452, 0.061348170, 0.071413054, 0.071413054,
0.071413054, 0.081477938, 0.025778993, -0.029919951, 0.10495685,
0.15127930, -0.014177644, 0.043475680, 0.11972285, 0.17038701,
0.080144106, 0.13886613, 0.19256639, -0.0040417525, 0.058780805,
-0.0059654108, 0.043501240, 0.10268145, 0.012838752, 0.019365734,
0.070999708, 0.066554060, 0.098630593, -0.041697964, 0.055967335,
0.083834500, 0.071740581, 0.066069011, -0.049221401, -0.040997277,
0.0056458618, 0.050528772, 0.083315954, 0.064787693, 0.071272221,
0.11462440, 0.085937449, 0.068063294, 0.078936183, 0.061066792,
0.10164505, 0.22551399, 0.20088610, 0.15750752, 0.15745568,
0.13426065, 0.13086236, 0.42475419, 0.35426926, 0.26677939,
0.23072707, 0.16998415, 0.17016685, 0.50725829, 0.37056822,
0.29026340, 0.23929801, 0.19027917, 0.18509452, 0.14636934,
0.46976649, 0.37464059, 0.30673212, 0.22792418, 0.19673625,
0.20176800, 0.20786696, -0.021859729, -0.027438053, -0.058549057,
-0.054302882, -0.0097157384, -0.0098055885, -0.017562975, -0.059990033,
-0.10632609, 0.020643219, -0.048138548])
elif self._system == 'ElektaNeuromagTriux':
oa_template = np.array([0.18360799, 0.12003697, 0.33445287, -0.27803913, -0.068841192,
0.38209576, -0.17740718, 0.16923261, 0.33698536, 0.14444730,
0.28414915, 0.21649465, -0.23332505, 0.021221704, 0.23283946,
-0.16586170, -0.029340197, 0.15159994, -0.11861228, 0.063994609,
0.15887337, -0.15331291, 0.12103925, 0.21762525, -0.26022441,
-0.29051216, 0.23624229, -0.20911411, -0.13089867, 0.15844157,
-0.14554117, -0.12354527, 0.083576864, -0.28942896, -0.10863199,
0.26069866, -0.13382335, -0.020152835, 0.10108698, -0.13221163,
0.0042310797, 0.054602311, -0.11179135, 0.051934803, 0.063177254,
-0.093829138, 0.053365325, 0.12545024, -0.14798746, -0.33213444,
0.18566677, -0.062983559, -0.31510336, 0.12082395, -0.048603552,
-0.25811763, 0.088032829, -0.13875872, -0.25371598, 0.12950875,
-0.00068137906, -0.21972821, 0.058637269, 0.018040675, -0.17439945,
-0.016842386, 0.011023214, -0.13851954, 0.0064568693, 0.00087816034,
-0.17815832, 0.035305152, -0.10482940, 0.033799893, -0.00073875417,
0.11312366, -0.0064186697, -0.040750148, 0.019746752, 0.083932856,
-0.043249978, 0.011361737, 0.088216613, 0.0050663023, 0.015717159,
-0.30934606, 0.040938890, 0.020970890, -0.25145939, 0.020623727,
0.078630036, -0.29707181, -0.049092018, 0.13215664, -0.30131723,
-0.12101881, 0.14769097, -0.23362375, -0.10673614, 0.080561570,
-0.25059843, -0.053442328, 0.025712179, -0.20809924, -0.0041900317,
0.045628096, -0.22151296, -0.064993409, 0.032620655, -0.18441844,
-0.061350852, -0.0043718732, -0.14552628, -0.037528696, 0.14178086,
0.016916950, -0.061763999, 0.15629734, 0.024629873, -0.10211258,
0.10376096, 0.053401006, -0.094262869, 0.11486065, 0.022095798,
-0.059982449, 0.20893838, -0.23494617, -0.19395047, 0.22377159,
-0.054523217, -0.24033766, 0.19479757, -0.10694107, -0.15641026,
0.17976663, -0.094075995, -0.10325845, 0.15671319, 0.016030663,
-0.15307202, 0.17259257, 0.079347885, -0.22070749, 0.13871766,
0.13303529, -0.18200036, 0.11318009, 0.075325625, -0.12847975,
0.22519082, -0.0026578764, -0.33413725, -0.14958983, 0.13876642,
-0.31017721, -0.10880966, 0.25502318, -0.25154015, 0.15544350,
0.18711886, -0.31257406, -0.076500332, 0.22446558, 0.26722754,
-0.050660953, 0.18436889, 0.17396986, 0.036027727, 0.20300253,
0.090146574, 0.082440245, 0.24578699, 0.13840596, -0.071482571,
0.15100916, 0.18566209, -0.073750761, 0.10136248, 0.14856450,
-0.031046211, 0.068987417, 0.12696809, -0.035587460, 0.11512855,
0.15619548, 0.021727986, 0.14983967, 0.063651880, -0.023533432,
0.17243586, 0.13961274, -0.018560930, 0.12728923, 0.10843198,
0.018077515, 0.094269730, 0.042793299, -0.061635196, 0.055970987,
0.11938486, -0.095553922, 0.025694485, 0.060390569, 0.019585127,
0.076071456, 0.020436739, -0.022882829, 0.045396907, 0.082927479,
-0.011168266, 0.049173714, 0.083202144, 0.019587681, 0.095796808,
0.047050082, -0.016594952, 0.12060474, 0.043040342, -0.010968210,
0.094254002, 0.11582725, -0.0033878286, 0.065452487, 0.030402745,
-0.0010179377, 0.082236103, -0.043251259, -0.0036983206, 0.087834116,
-0.044584616, 0.0024826310, 0.070374248, 0.019219473, 0.029849494,
0.096728388, -0.013784682, 0.0020963223, 0.11318502, -0.027328685,
0.0012622290, 0.086936357, -0.078408848, 0.0078774207, 0.075611206,
-0.0080653859, 0.10391830, -0.0021302612, -0.060074793, 0.071262115,
0.026229429, -0.081020928, 0.041278111, 0.068204081, -0.066598833,
0.0085404961, 0.078485480, -0.041530870, 0.011619860, 0.090003247,
-0.076780998, 0.035278074, 0.12705908, -0.11769492, 0.034106793,
0.12100020, -0.099653483, 0.011808040, 0.11109468, -0.072550723,
0.070069110, 0.080182691, -0.10876908, 0.089920955, 0.11840345,
-0.16562674, 0.062388752, 0.13242117, -0.15432277, 0.027970059,
0.092424300, -0.089983873, 0.048860316, 0.15898658, -0.14973049,
0.051211366, 0.15877839, -0.19457758, -0.019922747, 0.17720550,
-0.14981668, -0.010227319, 0.11611742, -0.12898792, 0.10517578,
0.13878154, -0.26682595, -0.064715030, 0.13192554, -0.20017487,
-0.034091207, 0.17313771, -0.17714283, 0.068179001, 0.13961502,
-0.20904324])
else:
# ToDo: implement also other systems
oa_template = np.zeros(picks_all[-1:][0] + 1)
return oa_template
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# create a topoplot from the template of ocular activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def topoplot_oa(self, info, show=False, fn_img=None):
"""
Creates a topoplot from the template of ocular
activity.
"""
# import necessary modules
import matplotlib.pyplot as plt
from mne import pick_types
from mne.viz import plot_topomap
from mne.channels.layout import _find_topomap_coords
if self._system == 'ElektaNeuromagTriux':
for ch_type in ['mag', 'planar1', 'planar2']:
picks = pick_types(info, meg=ch_type, eeg=False,
eog=False, stim=False, exclude='bads')
pos = _find_topomap_coords(info, picks)
plt.ioff()
fig = plt.figure('topoplot ocular activity', figsize=(12, 12))
plot_topomap(self._template_OA[picks], pos, res=200,
contours=0, show=False)
plt.ion()
# save results
if fn_img:
fig.savefig(fn_img + '_' + ch_type + '.png', format='png')
else:
pos = _find_topomap_coords(info, self._picks)
plt.ioff()
fig = plt.figure('topoplot ocular activity', figsize=(12, 12))
plot_topomap(self._template_OA[self._picks], pos, res=200, contours=0,
show=False)
plt.ion()
# if desired show the image
if show:
fig.show()
# save results
if fn_img:
fig.savefig(fn_img + '.png', format='png')
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# calculate optimal cost-function for cardiac activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_opt_cost_func_cardiac(self, meg_raw):
"""
Function to estimate the optimal parameter for a sigmoidal
based cost-function for cardiac activity. The optimization
is based on the ECG-signals which are recorded in synchrony
with the MEG-signals.
"""
# check if ECG channel exist in data
if self.name_ecg in meg_raw.ch_names:
# import necessary modules
from mne.preprocessing import find_ecg_events
from mne import Epochs, set_log_level
# set logger level to WARNING
set_log_level('WARNING')
# define some parameter
event_id_ecg = 999
# first identify R-peaks in ECG signal
idx_R_peak, _, _ = find_ecg_events(meg_raw, ch_name=self.name_ecg,
event_id=event_id_ecg, l_freq=self.ecg_freq[0],
h_freq=self.ecg_freq[1], verbose=None)
self._set_idx_R_peak(idx_R_peak - meg_raw.first_samp)
# generate epochs around R-peaks and average signal
picks = [meg_raw.info['ch_names'].index(self.name_ecg)]
ecg_epochs = Epochs(meg_raw, events=idx_R_peak, event_id=event_id_ecg,
tmin=-0.3, tmax=0.3, baseline=None, picks=picks,
verbose=None, proj=False)
ecg_signal = np.abs(ecg_epochs.average(picks=[0]).data.flatten())
# estimate optimal cost-function
cost_func = _fit_sigmoidal_to_cdf(ecg_signal)
if cost_func[1] > 20:
cost_func[1] = 20.0
self._set_opt_cost_func_cardiac(cost_func)
# if no ECG channel is found use sigmoidal function as cost-function
else:
print(">>>> NOTE: No ECG channel found!")
print(">>>> Simoidal function used as cost-function for cardiac activity!")
self._set_opt_cost_func_cardiac([1.0, 1.0])
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# calculate optimal cost-function for ocular activity
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_opt_cost_func_ocular(self, meg_raw):
"""
Function to estimate the optimal parameter for a sigmoidal
based cost-function for ocular activity. The optimization
is based on the EOG-signals which are recorded in synchrony
with the MEG-signals.
"""
# check if EOG channel exist in data
if self.name_eog in meg_raw.ch_names:
# import necessary modules
from jumeg.jumeg_math import calc_tkeo
from mne.preprocessing import find_eog_events
from mne import Epochs, set_log_level
from scipy.stats import scoreatpercentile as percentile
# set logger level to WARNING
set_log_level('WARNING')
# define some parameter
event_id_eog = 998
# first identify R-peaks in ECG signal
idx_eye_blink = find_eog_events(meg_raw, ch_name=self.name_eog,
event_id=event_id_eog, l_freq=self.eog_freq[0],
h_freq=self.eog_freq[1], verbose=None)
self._set_idx_eye_blink(idx_eye_blink - meg_raw.first_samp)
self._get_idx_eye_blink
# generate epochs around eye blinks and average signal
picks = [meg_raw.info['ch_names'].index(self.name_eog)]
eog_epochs = Epochs(meg_raw, events=idx_eye_blink, event_id=event_id_eog,
tmin=-0.3, tmax=0.3, baseline=None, picks=picks,
verbose=None, proj=False)
eog_epochs.verbose = None
eog_signal = np.abs(eog_epochs.average(picks=[0]).data.flatten())
# estimate optimal cost-function
cost_func = _fit_sigmoidal_to_cdf(eog_signal)
self._set_opt_cost_func_ocular(cost_func)
# perform tkeo-transformation to EOG-signals
self._eog_signals_tkeo = np.abs(calc_tkeo(meg_raw[picks][0]))
# estimate threshold for ocular activity
self._thresh_eog = percentile(self._eog_signals_tkeo, self._percentile_eog)
# if no EOG channel is found use sigmoidal function as cost-function
else:
print(">>>> NOTE: No EOG channel found!")
print(">>>> Simoidal function used as cost-function for ocular activity!")
self._set_opt_cost_func_ocular([1.0, 1.0])
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# interface to estimate the whitening matrix as well as
# the current weight matrix W_(i) based on the previous
# weight matrix W_(i-1).
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _update_weight_matrix(self, data, initial_weights=None,
ca_idx=None, oa_idx=None, annealstep=0.6):
"""
Interface to estimate the whitening matrix as well as the current
weight matrix W_(i) based on the previous weight matrix W_(i-1).
"""
# import necessary modules
from .ica import whitening
# estimate PCA structure
if self._pca is None:
pca_data, pca = whitening(data.T, dim_reduction=self.dim_reduction,
npc=self.npc, explainedVar=self.explVar)
self._pca = pca
self.npc = len(pca_data[0])
else:
# perform centering and whitening
dmean = data.mean(axis=-1)
stddev = np.std(data, axis=-1)
dnorm = (data - dmean[:, np.newaxis])/stddev[:, np.newaxis]
# generate principal components
if self.npc is None:
if initial_weights is None:
self.npc = len(dnorm)
else:
self.npc = initial_weights.shape[0]
pca_data = fast_dot(dnorm.T, self._pca.components_[:self.npc].T)
# update mean and standard-deviation in PCA object
self._pca.mean_ = dmean
self._pca.stddev_ = stddev
# estimate weight matrix
sphering = self._pca.components_[:self.npc].copy()
weights, activations = ocarta_constrained_ICA(pca_data, initial_weights=initial_weights,
maxsteps=self.maxsteps, lrate=self.lrate, ca_idx=ca_idx,
ca_cost_func=self.opt_cost_func_cardiac, oa_idx=oa_idx,
oa_cost_func=self.opt_cost_func_ocular, sphering=sphering,
oa_template=self._template_OA[self._picks],
annealstep=annealstep)
# return results
return activations, weights
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# interface for updating cleaning information, i.e.
# estimating the un-mixing matrix and identify ICs
# related to cardiac and ocular artifacts
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _update_cleaning_information(self, meg_raw, idx_start, idx_end,
initial_weights=None, ca_idx=None, oa_idx=None,
annealstep=0.6):
"""
Interface for updating cleaning information, i.e.
estimating the un-mixing matrix and identifying
independent components (ICs) related to ocular or
cardiac artifacts.
"""
# import necessary modules
from scipy.linalg import pinv
# (1) estimate optimal weight matrix
act, weights = self._update_weight_matrix(meg_raw._data[self._picks, idx_start:idx_end],
initial_weights=initial_weights,
ca_idx=ca_idx, oa_idx=oa_idx,
annealstep=annealstep)
# (2) identification of artifact ICs
# ------------------------------------------------------
# (a) for cardiac activity:
# get indices of the ICs belonging to cardiac activity
# --> using CTPS
idx_R_peak = self._get_idx_R_peak().copy()[:, 0]
idx_R_peak = idx_R_peak[idx_R_peak > idx_start]
idx_R_peak = idx_R_peak[idx_R_peak < idx_end] - idx_start
if len(idx_R_peak) < 3:
import pdb
pdb.set_trace()
idx_ca = identify_cardiac_activity(act.copy(), idx_R_peak, thresh_kui_ca=self._get_thresh_ecg(),
sfreq=meg_raw.info['sfreq'])
# (b) for ocular activity
# get indices of the ICs belonging to ocular activity
# --> using correlation with EOG signals
if self._get_name_eog() in meg_raw.ch_names:
self._set_npc(weights.shape[0])
spatial_maps = fast_dot(self._pca.components_[:self._get_npc()].T, pinv(weights)).T
# make sure that ICs already identified as being related
# to cardiac activity are not identified again
idx_ok = np.arange(self._get_npc())
idx_ok = np.setdiff1d(idx_ok, idx_ca)
eog_signals = meg_raw._data[meg_raw.info['ch_names'].index(self._get_name_eog()), idx_start:idx_end]
idx_oa = identify_ocular_activity(act[idx_ok], eog_signals, spatial_maps[idx_ok],
self._template_OA[self._picks], sfreq=meg_raw.info['sfreq'])
idx_oa = idx_ok[idx_oa]
else:
idx_oa = []
# return results
return weights, idx_ca, idx_oa
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# perform initial training to get starting values
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _initial_training(self, meg_raw, idx_start=None, idx_end=None):
"""
Interface for estimating OCARTA on trainings data set
in order to get optimal initial parameter for proper
OCARTA estimation
"""
# import necessary modules
from jumeg import jumeg_math as pre_math
from math import copysign as sgn
from mne import pick_types
from scipy.linalg import pinv
from scipy.stats.stats import pearsonr
# estimate optimal cost-functions for cardiac
# and ocular activity
self.calc_opt_cost_func_cardiac(meg_raw)
self.calc_opt_cost_func_ocular(meg_raw)
# get optimal spatial template for ocular activity
if not np.any(self._template_OA):
picks_all = pick_types(meg_raw.info, meg=True, eeg=False,
eog=False, stim=False, exclude='bads')
self._template_OA = self._get_template_oa(picks_all)
# get indices of trainings data set
# --> keep in mind that at least one eye-blink must occur
if (idx_start == None) or (idx_end == None):
if np.any(self.idx_eye_blink):
idx_start = 0
else:
idx_start = self._get_idx_eye_blink()[0, 0] - (0.5 * self._block)
if idx_start < 0:
idx_start = 0
idx_end = idx_start + self._block
if idx_end > self._ntsl:
idx_start = self._ntsl - self._block
idx_end = self._ntsl
# perform ICA on trainings data set
self._maxsteps *= 3
weights, idx_ca, idx_oa = self._update_cleaning_information(meg_raw, idx_start, idx_end, annealstep=0.9)
self._maxsteps /= 3
# update template of ocular activity
# (to have it individual for each subject)
if len(idx_oa) > 0:
oa_min = np.min(self._template_OA)
oa_max = np.max(self._template_OA)
oa_template = self._template_OA[self._picks].copy()
spatial_maps = fast_dot(self._pca.components_[:self.npc].T, pinv(weights)).T
if oa_min == oa_max:
oa_min = np.min(spatial_maps[idx_oa[0]])
oa_max = np.max(spatial_maps[idx_oa[0]])
# loop over all components related to ocular activity
for ioa in range(len(idx_oa)):
orientation = sgn(1., pearsonr(spatial_maps[idx_oa[ioa]], self._template_OA[self._picks])[0])
oa_template += pre_math.rescale(orientation * spatial_maps[idx_oa[ioa]], oa_min, oa_max)
self._template_OA[self._picks] = oa_template
# return results
return weights, idx_ca, idx_oa
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# estimate performance values
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def performance(self, meg_raw, meg_clean):
# import necessary modules
from jumeg.jumeg_math import calc_performance, calc_frequency_correlation
from mne import Epochs
from mne.preprocessing import find_ecg_events, find_eog_events
# from jumeg.jumeg_utils import get_peak_ecg
perf_ar = np.zeros(2)
freq_corr_ar = np.zeros(2)
# ECG, EOG: loop over all artifact events
for idx_ar in range(0, 2):
# for cardiac artifacts
if (idx_ar == 0) and self._get_name_ecg() in meg_raw.ch_names:
event_id = 999
idx_event, _, _ = find_ecg_events(meg_raw, event_id,
ch_name=self._get_name_ecg(),
verbose=False)
# for ocular artifacts
elif self._get_name_eog() in meg_raw.ch_names:
event_id = 998
idx_event = find_eog_events(meg_raw, event_id,
ch_name=self._get_name_eog(),
verbose=False)
else:
event_id = 0
if event_id:
# generate epochs
raw_epochs = Epochs(meg_raw, idx_event, event_id, -0.4, 0.4,
picks=self._picks, baseline=(None, None), proj=False,
verbose=False)
cleaned_epochs = Epochs(meg_clean, idx_event, event_id, -0.4, 0.4,
picks=self._picks, baseline=(None, None), proj=False,
verbose=False)
raw_epochs_avg = raw_epochs.average()
cleaned_epochs_avg = cleaned_epochs.average()
# estimate performance and frequency correlation
perf_ar[idx_ar] = calc_performance(raw_epochs_avg, cleaned_epochs_avg)
freq_corr_ar[idx_ar] = calc_frequency_correlation(raw_epochs_avg, cleaned_epochs_avg)
return perf_ar, freq_corr_ar
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# clean data using OCARTA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def fit(self, fn_raw, meg_raw=None, denoising=None,
flow=None, fhigh=None, plot_template_OA=False, verbose=True,
name_ecg=None, ecg_freq=None, thresh_ecg=None,
name_eog=None, eog_freq=None, seg_length=None, shift_length=None,
npc=None, explVar=None, lrate=None, maxsteps=None,
fn_perf_img=None, dim_reduction=None):
"""
Function to fit OCARTA to input raw data file.
Parameters
----------
fn_raw : filename of the input data. Note, data should be
filtered prior to ICA application.
Optional parameters
-------------------
meg_raw : instance of mne.io.Raw. If set 'fn_raw' is ignored and
the data stored in meg_raw are processed
default: meg_raw=None
denoising : If set data are denoised, i.e. when reconstructing the
cleaned data set only the components explaining 'denoising'
percentage of variance are taken. Must be between 0 and 1.
default: denoising=None
flow: if set data to estimate the optimal de-mixing matrix are filtered
prior to estimation. Note, data cleaning is applied to unfiltered
input data
default: flow=1
fhigh: if set data to estimate the optimal de-mixing matrix are filtered
prior to estimation. Note, data cleaning is applied to unfiltered
input data
default: fhigh=20
plot_template_OA: If set a topoplot of the template for ocular activity
is generated
default: plot_template_OA=False
verbose : bool, str, int, or None
If not None, override default verbose level
(see mne.verbose).
default: verbose=True
for meaning of other optional parameter see JuMEG_ocarta.__init__, where
the ocarta object is generated.
Returns
-------
meg_clean : instance of mne.io.Raw. Cleaned version of the input data
fn_out : filename of the cleaned data. It is constructed from the
input filename by adding the extension ',ocarta-raw.fif'
"""
# import necessary modules
from jumeg.jumeg_plot import plot_performance_artifact_rejection as plt_perf
from jumeg.jumeg_utils import get_sytem_type
from mne import pick_types, set_log_level
from mne.io import Raw
from scipy.linalg import pinv
# set log level to 'WARNING'
set_log_level('WARNING')
# read raw data in
if meg_raw == None:
meg_raw = Raw(fn_raw, preload=True, verbose=False)
else:
fn_raw = meg_raw.filenames[0]
# check input parameter
if name_ecg:
self.name_ecg = name_ecg
if ecg_freq:
self.ecg_freq = ecg_freq
if thresh_ecg:
self.thresh_ecg = thresh_ecg
if name_eog:
self.name_eog = name_eog
if eog_freq:
self.eog_freq = eog_freq
if seg_length:
self.seg_length = seg_length
if shift_length:
self.shift_length = shift_length
if explVar:
self.explVar = explVar
if npc:
self.npc = npc
if lrate:
self.lrate = lrate
if maxsteps:
self.maxsteps = maxsteps
if flow:
self.flow = flow
if fhigh:
self.fhigh = fhigh
if dim_reduction:
self.dim_reduction = dim_reduction
# extract parameter from input data
self._system = get_sytem_type(meg_raw.info)
self._ntsl = int(meg_raw._data.shape[1])
self._block = int(self._seg_length * meg_raw.info['sfreq'])
# make sure that everything is initialized well
self._eog_signals_tkeo = None
self._idx_eye_blink = None
self._idx_R_peak = None
self._pca = None
self._template_OA = None
self._thresh_eog = 0.0
self._performance_ca = 0.0
self._performance_oa = 0.0
self._freq_corr_ca = 0.0
self._freq_corr_oa = 0.0
meg_clean = meg_raw.copy()
meg_filt = meg_raw.copy()
# check if data should be filtered prior to estimate
# the optimal demixing parameter
if self.flow or self.fhigh:
# import filter module
from jumeg.filter import jumeg_filter
# define filter type
if not self.flow:
filter_type = 'lp'
self.flow = self.fhigh
filter_info = " --> filter parameter : filter type=low pass %d Hz" % self.flow
elif not self.fhigh:
filter_type = 'hp'
filter_info = " --> filter parameter : filter type=high pass %d Hz" % self.flow
else:
filter_type = 'bp'
filter_info = " --> filter parameter : filter type=band pass %d-%d Hz" % (self.flow, self.fhigh)
fi_mne_notch = jumeg_filter(fcut1=self.flow, fcut2=self.fhigh,
filter_method= "ws",
filter_type=filter_type,
remove_dcoffset=False,
sampling_frequency=meg_raw.info['sfreq'])
fi_mne_notch.apply_filter(meg_filt._data, picks=self._picks)
# -----------------------------------------
# check if we have Elekta data
# --> if yes OCARTA has to be performed
# twice, once for magnetometer and
# once for gradiometer
# -----------------------------------------
if self._system == 'ElektaNeuromagTriux':
ch_types = ['mag', 'grad']
if verbose:
print(">>>> NOTE: as input data contain gardiometer and magnetometer")
print(">>>> OCARTA has to be performed twice!")
else:
ch_types = [True]
# loop over all channel types
for ch_type in ch_types:
self._picks = pick_types(meg_raw.info, meg=ch_type, eeg=False,
eog=False, stim=False, exclude='bads')
self._pca = None
# perform initial training
weights, idx_ca, idx_oa = self._initial_training(meg_filt)
# get some parameter
nchan = self._picks.shape[0]
shift = int(self.shift_length * meg_filt.info['sfreq'])
nsteps = np.floor((self._ntsl - self._block)/shift) + 1
laststep = int(shift * nsteps)
# print out some information
if verbose:
print(">>>> calculating OCARTA")
print(" --> number of channels : %d" % nchan)
print(" --> number of timeslices : %d" % self._ntsl)
print(" --> dimension reduction method: %s" % self._dim_reduction)
if self._dim_reduction == 'explVar':
print(" --> explained variance : %g" % self.explVar)
print(" --> number of components : %d" % weights.shape[0])
print(" --> block size (in s) : %d" % self.seg_length)
print(" --> number of blocks : %d" % nsteps)
print(" --> block shift (in s) : %d" % self.shift_length)
print(" --> maxsteps training : %d" % (3 * self.maxsteps))
print(" --> maxsteps cleaning : %d" % self.maxsteps)
print(" --> costfunction CA : a0=%g, a1=%g" % (self.opt_cost_func_cardiac[0], self.opt_cost_func_cardiac[1]))
print(" --> costfunction OA : a0=%g, a1=%g" % (self.opt_cost_func_ocular[0], self.opt_cost_func_ocular[1]))
print("")
if self.flow or self.fhigh:
print(">>>> NOTE: Optimal cleaning parameter are estimated from filtered data!")
print(" However, cleaning is performed on unfiltered input data!")
print(filter_info)
print("")
# check if denoising is desired
sphering = self._pca.components_.copy()
if denoising:
full_var = np.sum(self._pca.explained_variance_)
exp_var_ratio = self._pca.explained_variance_ / full_var
npc_denoising = np.sum(exp_var_ratio.cumsum() <= denoising) + 1
if npc_denoising < self.npc:
npc_denoising = self.npc
sphering[npc_denoising:, :] = 0.
# now loop over all segments
for istep, t in enumerate(range(0, laststep, shift)):
# print out some information
if verbose:
print(">>>> Step %d of %d..." % (istep+1, nsteps))
# --------------------------------------
# Estimating un-mixing matrix and
# identify ICs related to artifacts
# --------------------------------------
idx_end = t+self._block # get index of last element
if (idx_end+shift+1) > self._ntsl:
idx_end = self._ntsl
weights, idx_ca, idx_oa = self._update_cleaning_information(meg_filt, t, idx_end,
initial_weights=weights.T,
ca_idx=idx_ca, oa_idx=idx_oa)
print("CA: %s, OA: %s" % (np.array_str(idx_ca), np.array_str(idx_oa)))
# get cleaning matrices
iweights = pinv(weights)
iweights[:, idx_ca] = 0. # remove columns related to CA
if len(idx_oa) > 0:
iweights[:, idx_oa] = 0. # remove columns related to OA
# transform data to ICA space
dnorm = (meg_raw._data[self._picks, t:idx_end] - self._pca.mean_[:, np.newaxis]) / self._pca.stddev_[:, np.newaxis]
pc = fast_dot(dnorm.T, sphering.T)
activations = fast_dot(weights, pc[:, :self.npc].T) # transform to ICA-space
# backtransform data
pc[:, :self.npc] = fast_dot(iweights, activations).T # back-transform to PCA-space
meg_clean._data[self._picks, t:idx_end] = fast_dot(pc, sphering).T * self._pca.stddev_[:, np.newaxis] + \
self._pca.mean_[:, np.newaxis] # back-transform to sensor-space
# write out some additional information
if verbose:
print("")
print(">>>> cleaning done!")
print(">>>> generate and save result files/images.")
# generate filenames for output files/images
basename = fn_raw[:-8]
if not fn_perf_img:
fn_perf_img = basename + ',ocarta-performance'
fn_topo = fn_perf_img[:fn_perf_img.rfind(',')] + ',ocarta_topoplot_oa'
fn_out = basename + ',ocarta-raw.fif'
# save cleaned data
meg_clean.save(fn_out, overwrite=True, verbose=False)
# generate topoplot image
if plot_template_OA and not np.all(self._template_OA == 0):
self.topoplot_oa(meg_raw.info, fn_img=fn_topo)
# generate performance image
plt_perf(meg_raw, None, fn_perf_img, meg_clean=meg_clean,
name_ecg=self.name_ecg, name_eog=self.name_eog)
# estimate performance values/frequency correlation
perf_ar, freq_corr_ar = self.performance(meg_raw, meg_clean)
self.performance_ca = perf_ar[0]
self.performance_oa = perf_ar[1]
self.freq_corr_ca = freq_corr_ar[0]
self.freq_corr_oa = freq_corr_ar[1]
return meg_clean, fn_out
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# to simplify the call of the JuMEG_ocarta() help
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
ocarta = JuMEG_ocarta()
| bsd-3-clause |
wdzhou/mantid | scripts/HFIR_4Circle_Reduction/mplgraphicsview.py | 3 | 54538 | #pylint: disable=invalid-name,too-many-public-methods,too-many-arguments,non-parent-init-called,R0902,too-many-branches,C0302
from __future__ import (absolute_import, division, print_function)
from six.moves import range
import os
import numpy as np
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignal
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar2
from matplotlib.figure import Figure
import matplotlib.image
MplLineStyles = ['-', '--', '-.', ':', 'None', ' ', '']
MplLineMarkers = [
". (point )",
"* (star )",
"x (x )",
"o (circle )",
"s (square )",
"D (diamond )",
", (pixel )",
"v (triangle_down )",
"^ (triangle_up )",
"< (triangle_left )",
"> (triangle_right)",
"1 (tri_down )",
"2 (tri_up )",
"3 (tri_left )",
"4 (tri_right )",
"8 (octagon )",
"p (pentagon )",
"h (hexagon1 )",
"H (hexagon2 )",
"+ (plus )",
"d (thin_diamond )",
"| (vline )",
"_ (hline )",
"None (nothing )"]
# Note: in colors, "white" is removed
MplBasicColors = [
"black",
"red",
"blue",
"green",
"cyan",
"magenta",
"yellow"]
class IndicatorManager(object):
""" Manager for all indicator lines
Indicator's Type =
- 0: horizontal. moving along Y-direction. [x_min, x_max], [y, y];
- 1: vertical. moving along X-direction. [x, x], [y_min, y_max];
- 2: 2-way. moving in any direction. [x_min, x_max], [y, y], [x, x], [y_min, y_max].
"""
def __init__(self):
"""
:return:
"""
# Auto color index
self._colorIndex = 0
# Auto line ID
self._autoLineID = 1
self._lineManager = dict()
self._canvasLineKeyDict = dict()
self._indicatorTypeDict = dict() # value: 0 (horizontal), 1 (vertical), 2 (2-way)
return
def add_2way_indicator(self, x, x_min, x_max, y, y_min, y_max, color):
"""
:param x:
:param x_min:
:param x_max:
:param y:
:param y_min:
:param y_max:
:param color:
:return:
"""
# Set up indicator ID
this_id = str(self._autoLineID)
self._autoLineID += 1
# Set up vectors
vec_x_horizontal = np.array([x_min, x_max])
vec_y_horizontal = np.array([y, y])
vec_x_vertical = np.array([x, x])
vec_y_vertical = np.array([y_min, y_max])
#
self._lineManager[this_id] = [vec_x_horizontal, vec_y_horizontal, vec_x_vertical, vec_y_vertical, color]
self._indicatorTypeDict[this_id] = 2
return this_id
def add_horizontal_indicator(self, y, x_min, x_max, color):
"""
Add a horizontal indicator moving vertically
:param y:
:param x_min:
:param x_max:
:param color:
:return:
"""
# Get ID
this_id = str(self._autoLineID)
self._autoLineID += 1
#
vec_x = np.array([x_min, x_max])
vec_y = np.array([y, y])
#
self._lineManager[this_id] = [vec_x, vec_y, color]
self._indicatorTypeDict[this_id] = 0
return this_id
def add_vertical_indicator(self, x, y_min, y_max, color):
"""
Add a vertical indicator to data structure moving horizontally
:return: indicator ID as an integer
"""
# Get ID
this_id = self._autoLineID
self._autoLineID += 1
# form vec x and vec y
vec_x = np.array([x, x])
vec_y = np.array([y_min, y_max])
#
self._lineManager[this_id] = [vec_x, vec_y, color]
self._indicatorTypeDict[this_id] = 1
return this_id
def delete(self, indicator_id):
"""
Delete indicator
"""
del self._lineManager[indicator_id]
del self._canvasLineKeyDict[indicator_id]
del self._indicatorTypeDict[indicator_id]
return
def get_canvas_line_index(self, indicator_id):
"""
Get a line's ID (on canvas) from an indicator ID
:param indicator_id:
:return:
"""
assert isinstance(indicator_id, int)
if indicator_id not in self._canvasLineKeyDict:
raise RuntimeError('Indicator ID %s cannot be found. Current keys are %s.' % (
indicator_id, str(sorted(self._canvasLineKeyDict.keys()))
))
return self._canvasLineKeyDict[indicator_id]
def get_line_type(self, my_id):
"""
:param my_id:
:return:
"""
return self._indicatorTypeDict[my_id]
def get_2way_data(self, line_id):
"""
:param line_id:
:return:
"""
assert line_id in self._indicatorTypeDict, 'blabla'
assert self._indicatorTypeDict[line_id] == 2, 'blabla'
vec_set = [self._lineManager[line_id][0:2], self._lineManager[line_id][2:4]]
return vec_set
def get_data(self, line_id):
"""
Get line's vector x and vector y
:param line_id:
:return: 2-tuple of numpy arrays
"""
return self._lineManager[line_id][0], self._lineManager[line_id][1]
def get_indicator_key(self, x, y):
""" Get indicator's key with position
:return:
"""
if x is None and y is None:
raise RuntimeError('It is not allowed to have both X and Y are none to get indicator key.')
ret_key = None
for line_key in self._lineManager.keys():
if x is not None and y is not None:
# 2 way
raise NotImplementedError('ASAP')
elif x is not None and self._indicatorTypeDict[line_key] == 1:
# vertical indicator moving along X
if abs(self._lineManager[line_key][0][0] - x) < 1.0E-2:
return line_key
elif y is not None and self._indicatorTypeDict[line_key] == 0:
# horizontal indicator moving along Y
if abs(self._lineManager[line_key][1][0] - y) < 1.0E-2:
return line_key
# END-FOR
return ret_key
@staticmethod
def get_line_style(line_id=None):
"""
:param line_id:
:return:
"""
if line_id is not None:
style = '--'
else:
style = '--'
return style
def get_live_indicator_ids(self):
"""
:return:
"""
return sorted(self._lineManager.keys())
@staticmethod
def get_marker():
"""
Get the marker a line
:return:
"""
return '.'
def get_next_color(self):
"""
Get next color by auto color index
:return: string as color
"""
next_color = MplBasicColors[self._colorIndex]
# Advance and possibly reset color scheme
self._colorIndex += 1
if self._colorIndex == len(MplBasicColors):
self._colorIndex = 0
return next_color
def set_canvas_line_index(self, my_id, canvas_line_index):
"""
:param my_id:
:param canvas_line_index:
:return:
"""
self._canvasLineKeyDict[my_id] = canvas_line_index
return
def set_position(self, my_id, pos_x, pos_y):
""" Set the indicator to a new position
:param line_id:
:param pos_x:
:param pos_y:
:return:
"""
if self._indicatorTypeDict[my_id] == 0:
# horizontal
self._lineManager[my_id][1][0] = pos_y
self._lineManager[my_id][1][1] = pos_y
elif self._indicatorTypeDict[my_id] == 1:
# vertical
self._lineManager[my_id][0][0] = pos_x
self._lineManager[my_id][0][1] = pos_x
elif self._indicatorTypeDict[my_id] == 2:
# 2-way
self._lineManager[my_id][0] = pos_x
self._lineManager[my_id][1] = pos_y
else:
raise RuntimeError('Unsupported indicator of type %d' % self._indicatorTypeDict[my_id])
self._lineManager[my_id][2] = 'black'
return
def shift(self, my_id, dx, dy):
"""
:param my_id:
:param dx:
:param dy:
:return:
"""
if self._indicatorTypeDict[my_id] == 0:
# horizontal
self._lineManager[my_id][1] += dy
elif self._indicatorTypeDict[my_id] == 1:
# vertical
self._lineManager[my_id][0] += dx
elif self._indicatorTypeDict[my_id] == 2:
# 2-way
self._lineManager[my_id][2] += dx
self._lineManager[my_id][1] += dy
else:
raise RuntimeError('Unsupported indicator of type %d' % self._indicatorTypeDict[my_id])
return
def update_indicators_range(self, x_range, y_range):
"""
Update indicator's range
:param x_range:
:param y_range:
:return:
"""
for i_id in self._lineManager.keys():
# NEXT - Need a new flag for direction of the indicating line, vertical or horizontal
if True:
self._lineManager[i_id][1][0] = y_range[0]
self._lineManager[i_id][1][-1] = y_range[1]
else:
self._lineManager[i_id][0][0] = x_range[0]
self._lineManager[i_id][0][-1] = x_range[1]
return
class MplGraphicsView(QtGui.QWidget):
""" A combined graphics view including matplotlib canvas and
a navigation tool bar
Note: Merged with HFIR_Powder_Reduction.MplFigureCAnvas
"""
def __init__(self, parent):
""" Initialization
"""
# Initialize parent
QtGui.QWidget.__init__(self, parent)
# set up canvas
self._myCanvas = Qt4MplCanvas(self)
self._myToolBar = MyNavigationToolbar(self, self._myCanvas)
# state of operation
self._isZoomed = False
# X and Y limit with home button
self._homeXYLimit = None
# set up layout
self._vBox = QtGui.QVBoxLayout(self)
self._vBox.addWidget(self._myCanvas)
self._vBox.addWidget(self._myToolBar)
# auto line's maker+color list
self._myLineMarkerColorList = []
self._myLineMarkerColorIndex = 0
self.setAutoLineMarkerColorCombo()
# records for all the lines that are plot on the canvas
self._my1DPlotDict = dict()
# Declaration of class variables
self._indicatorKey = None
# Indicator manager
self._myIndicatorsManager = IndicatorManager()
# some statistic recorder for convenient operation
self._statDict = dict()
self._statRightPlotDict = dict()
return
def add_arrow(self, start_x, start_y, stop_x, stop_y):
"""
:param start_x:
:param start_y:
:param stop_x:
:param stop_y:
:return:
"""
self._myCanvas.add_arrow(start_x, start_y, stop_x, stop_y)
return
def add_line_set(self, vec_set, color, marker, line_style, line_width):
""" Add a set of line and manage together
:param vec_set:
:param color:
:param marker:
:param line_style:
:param line_width:
:return:
"""
key_list = list()
for vec_x, vec_y in vec_set:
temp_key = self._myCanvas.add_plot_1d(vec_x, vec_y, color=color, marker=marker,
line_style=line_style, line_width=line_width)
assert isinstance(temp_key, int)
assert temp_key >= 0
key_list.append(temp_key)
return key_list
def add_plot_1d(self, vec_x, vec_y, y_err=None, color=None, label='', x_label=None, y_label=None,
marker=None, line_style=None, line_width=1, show_legend=True):
"""
Add a 1-D plot to canvas
:param vec_x:
:param vec_y:
:param y_err:
:param color:
:param label:
:param x_label:
:param y_label:
:param marker:
:param line_style:
:param line_width:
:param show_legend:
:return: line ID (key to the line)
"""
line_id = self._myCanvas.add_plot_1d(vec_x, vec_y, y_err, color, label, x_label, y_label, marker, line_style,
line_width, show_legend)
return line_id
def add_plot_1d_right(self, vec_x, vec_y, color=None, label='', marker=None, line_style=None, line_width=1):
"""
Add 1 line (1-d plot) to right axis
:param vec_x:
:param vec_y:
:param color:
:param label:
:param marker:
:param line_style:
:param line_width:
:return:
"""
line_key = self._myCanvas.add_1d_plot_right(vec_x, vec_y, label=label,
color=color, marker=marker,
linestyle=line_style, linewidth=line_width)
self._statRightPlotDict[line_key] = (min(vec_x), max(vec_x), min(vec_y), max(vec_y))
return line_key
def add_2way_indicator(self, x=None, y=None, color=None, master_line=None):
""" Add a 2-way indicator following an existing line?
:param x:
:param y:
:param color:
:return:
"""
if master_line is not None:
raise RuntimeError('Implement how to use master_line ASAP.')
x_min, x_max = self._myCanvas.getXLimit()
if x is None:
x = (x_min + x_max) * 0.5
else:
assert isinstance(x, float)
y_min, y_max = self._myCanvas.getYLimit()
if y is None:
y = (y_min + y_max) * 0.5
else:
assert isinstance(y, float)
if color is None:
color = self._myIndicatorsManager.get_next_color()
else:
assert isinstance(color, str)
my_id = self._myIndicatorsManager.add_2way_indicator(x, x_min, x_max,
y, y_min, y_max,
color)
vec_set = self._myIndicatorsManager.get_2way_data(my_id)
canvas_line_index = self.add_line_set(vec_set, color=color,
marker=self._myIndicatorsManager.get_marker(),
line_style=self._myIndicatorsManager.get_line_style(),
line_width=1)
self._myIndicatorsManager.set_canvas_line_index(my_id, canvas_line_index)
return my_id
def add_horizontal_indicator(self, y=None, color=None):
""" Add an indicator line
"""
# Default
if y is None:
y_min, y_max = self._myCanvas.getYLimit()
y = (y_min + y_max) * 0.5
else:
assert isinstance(y, float)
x_min, x_max = self._myCanvas.getXLimit()
# For color
if color is None:
color = self._myIndicatorsManager.get_next_color()
else:
assert isinstance(color, str)
# Form
my_id = self._myIndicatorsManager.add_horizontal_indicator(y, x_min, x_max, color)
vec_x, vec_y = self._myIndicatorsManager.get_data(my_id)
canvas_line_index = self._myCanvas.add_plot_1d(vec_x=vec_x, vec_y=vec_y,
color=color, marker=self._myIndicatorsManager.get_marker(),
line_style=self._myIndicatorsManager.get_line_style(),
line_width=1)
self._myIndicatorsManager.set_canvas_line_index(my_id, canvas_line_index)
return my_id
def add_vertical_indicator(self, x=None, color=None, style=None, line_width=1):
"""
Add a vertical indicator line
Guarantees: an indicator is plot and its ID is returned
:param x: None as the automatic mode using default from middle of canvas
:param color: None as the automatic mode using default
:param style:
:return: indicator ID
"""
# For indicator line's position
if x is None:
x_min, x_max = self._myCanvas.getXLimit()
x = (x_min + x_max) * 0.5
else:
assert isinstance(x, float)
y_min, y_max = self._myCanvas.getYLimit()
# For color
if color is None:
color = self._myIndicatorsManager.get_next_color()
else:
assert isinstance(color, str)
# style
if style is None:
style = self._myIndicatorsManager.get_line_style()
# Form
my_id = self._myIndicatorsManager.add_vertical_indicator(x, y_min, y_max, color)
vec_x, vec_y = self._myIndicatorsManager.get_data(my_id)
canvas_line_index = self._myCanvas.add_plot_1d(vec_x=vec_x, vec_y=vec_y,
color=color, marker=self._myIndicatorsManager.get_marker(),
line_style=self._myIndicatorsManager.get_line_style(),
line_width=1)
self._myIndicatorsManager.set_canvas_line_index(my_id, canvas_line_index)
return my_id
def add_plot_2d(self, array2d, x_min, x_max, y_min, y_max, hold_prev_image=True, y_tick_label=None):
"""
Add a 2D image to canvas
:param array2d: numpy 2D array
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:param hold_prev_image:
:param y_tick_label:
:return:
"""
self._myCanvas.addPlot2D(array2d, x_min, x_max, y_min, y_max, hold_prev_image, y_tick_label)
return
def addImage(self, imagefilename):
""" Add an image by file
"""
# check
if os.path.exists(imagefilename) is False:
raise NotImplementedError("Image file %s does not exist." % (imagefilename))
self._myCanvas.addImage(imagefilename)
return
def canvas(self):
""" Get the canvas
:return:
"""
return self._myCanvas
def clear_all_lines(self):
"""
"""
self._myCanvas.clear_all_1d_plots()
self._statRightPlotDict.clear()
self._statDict.clear()
self._my1DPlotDict.clear()
# about zoom
self._isZoomed = False
self._homeXYLimit = None
return
def clear_canvas(self):
""" Clear canvas
"""
# clear all the records
self._statDict.clear()
self._my1DPlotDict.clear()
# about zoom
self._isZoomed = False
self._homeXYLimit = None
return self._myCanvas.clear_canvas()
def draw(self):
""" Draw to commit the change
"""
return self._myCanvas.draw()
def evt_toolbar_home(self):
"""
Parameters
----------
Returns
-------
"""
# turn off zoom mode
self._isZoomed = False
return
def evt_view_updated(self):
""" Event handling as canvas size updated
:return:
"""
# update the indicator
new_x_range = self.getXLimit()
new_y_range = self.getYLimit()
self._myIndicatorsManager.update_indicators_range(new_x_range, new_y_range)
for indicator_key in self._myIndicatorsManager.get_live_indicator_ids():
canvas_line_id = self._myIndicatorsManager.get_canvas_line_index(indicator_key)
data_x, data_y = self._myIndicatorsManager.get_data(indicator_key)
self.updateLine(canvas_line_id, data_x, data_y)
# END-FOR
return
def evt_zoom_released(self):
"""
event for zoom is release
Returns
-------
"""
# record home XY limit if it is never zoomed
if self._isZoomed is False:
self._homeXYLimit = list(self.getXLimit())
self._homeXYLimit.extend(list(self.getYLimit()))
# END-IF
# set the state of being zoomed
self._isZoomed = True
return
def getPlot(self):
"""
"""
return self._myCanvas.getPlot()
def getLastPlotIndexKey(self):
""" Get ...
"""
return self._myCanvas.getLastPlotIndexKey()
def getXLimit(self):
""" Get limit of Y-axis
:return: 2-tuple as xmin, xmax
"""
return self._myCanvas.getXLimit()
def getYLimit(self):
""" Get limit of Y-axis
"""
return self._myCanvas.getYLimit()
def get_y_min(self):
"""
Get the minimum Y value of the plots on canvas
:return:
"""
if len(self._statDict) == 0:
return 1E10
line_id_list = self._statDict.keys()
min_y = self._statDict[line_id_list[0]][2]
for i_plot in range(1, len(line_id_list)):
if self._statDict[line_id_list[i_plot]][2] < min_y:
min_y = self._statDict[line_id_list[i_plot]][2]
return min_y
def get_y_max(self):
"""
Get the maximum Y value of the plots on canvas
:return:
"""
if len(self._statDict) == 0:
return -1E10
line_id_list = self._statDict.keys()
max_y = self._statDict[line_id_list[0]][3]
for i_plot in range(1, len(line_id_list)):
if self._statDict[line_id_list[i_plot]][3] > max_y:
max_y = self._statDict[line_id_list[i_plot]][3]
return max_y
def move_indicator(self, line_id, dx, dy):
"""
Move the indicator line in horizontal
:param line_id:
:param dx:
:return:
"""
# Shift value
self._myIndicatorsManager.shift(line_id, dx=dx, dy=dy)
# apply to plot on canvas
if self._myIndicatorsManager.get_line_type(line_id) < 2:
# horizontal or vertical
canvas_line_index = self._myIndicatorsManager.get_canvas_line_index(line_id)
vec_x, vec_y = self._myIndicatorsManager.get_data(line_id)
self._myCanvas.updateLine(ikey=canvas_line_index, vecx=vec_x, vecy=vec_y)
else:
# 2-way
canvas_line_index_h, canvas_line_index_v = self._myIndicatorsManager.get_canvas_line_index(line_id)
h_vec_set, v_vec_set = self._myIndicatorsManager.get_2way_data(line_id)
self._myCanvas.updateLine(ikey=canvas_line_index_h, vecx=h_vec_set[0], vecy=h_vec_set[1])
self._myCanvas.updateLine(ikey=canvas_line_index_v, vecx=v_vec_set[0], vecy=v_vec_set[1])
return
def remove_indicator(self, indicator_key):
""" Remove indicator line
:param indicator_key:
:return:
"""
#
plot_id = self._myIndicatorsManager.get_canvas_line_index(indicator_key)
self._myCanvas.remove_plot_1d(plot_id)
self._myIndicatorsManager.delete(indicator_key)
return
def remove_line(self, line_id):
""" Remove a line
:param line_id:
:return:
"""
# remove line
self._myCanvas.remove_plot_1d(line_id)
# remove the records
if line_id in self._statDict:
del self._statDict[line_id]
del self._my1DPlotDict[line_id]
else:
del self._statRightPlotDict[line_id]
return
def set_indicator_position(self, line_id, pos_x, pos_y):
""" Set the indicator to new position
:param line_id:
:param pos_x:
:param pos_y:
:return:
"""
# Set value
self._myIndicatorsManager.set_position(line_id, pos_x, pos_y)
# apply to plot on canvas
if self._myIndicatorsManager.get_line_type(line_id) < 2:
# horizontal or vertical
canvas_line_index = self._myIndicatorsManager.get_canvas_line_index(line_id)
vec_x, vec_y = self._myIndicatorsManager.get_data(line_id)
self._myCanvas.updateLine(ikey=canvas_line_index, vecx=vec_x, vecy=vec_y)
else:
# 2-way
canvas_line_index_h, canvas_line_index_v = self._myIndicatorsManager.get_canvas_line_index(line_id)
h_vec_set, v_vec_set = self._myIndicatorsManager.get_2way_data(line_id)
self._myCanvas.updateLine(ikey=canvas_line_index_h, vecx=h_vec_set[0], vecy=h_vec_set[1])
self._myCanvas.updateLine(ikey=canvas_line_index_v, vecx=v_vec_set[0], vecy=v_vec_set[1])
return
def removePlot(self, ikey):
"""
"""
return self._myCanvas.remove_plot_1d(ikey)
def updateLine(self, ikey, vecx=None, vecy=None, linestyle=None, linecolor=None, marker=None, markercolor=None):
"""
update a line's set up
Parameters
----------
ikey
vecx
vecy
linestyle
linecolor
marker
markercolor
Returns
-------
"""
# check
assert isinstance(ikey, int), 'Line key must be an integer.'
assert ikey in self._my1DPlotDict, 'Line with ID %d is not on canvas. ' % ikey
return self._myCanvas.updateLine(ikey, vecx, vecy, linestyle, linecolor, marker, markercolor)
def update_indicator(self, i_key, color):
"""
Update indicator with new color
:param i_key:
:param vec_x:
:param vec_y:
:param color:
:return:
"""
if self._myIndicatorsManager.get_line_type(i_key) < 2:
# horizontal or vertical
canvas_line_index = self._myIndicatorsManager.get_canvas_line_index(i_key)
self._myCanvas.updateLine(ikey=canvas_line_index, vecx=None, vecy=None, linecolor=color)
else:
# 2-way
canvas_line_index_h, canvas_line_index_v = self._myIndicatorsManager.get_canvas_line_index(i_key)
# h_vec_set, v_vec_set = self._myIndicatorsManager.get_2way_data(i_key)
self._myCanvas.updateLine(ikey=canvas_line_index_h, vecx=None, vecy=None, linecolor=color)
self._myCanvas.updateLine(ikey=canvas_line_index_v, vecx=None, vecy=None, linecolor=color)
return
def get_canvas(self):
"""
get canvas
Returns:
"""
return self._myCanvas
def get_current_plots(self):
"""
Get the current plots on canvas
Returns
-------
list of 2-tuple: integer (plot ID) and string (label)
"""
tuple_list = list()
line_id_list = sorted(self._my1DPlotDict.keys())
for line_id in line_id_list:
tuple_list.append((line_id, self._my1DPlotDict[line_id]))
return tuple_list
def get_indicator_key(self, x, y):
""" Get the key of the indicator with given position
:param picker_pos:
:return:
"""
return self._myIndicatorsManager.get_indicator_key(x, y)
def get_indicator_position(self, indicator_key):
""" Get position (x or y) of the indicator
:param indicator_key
:return: a tuple. (0) horizontal (x, x); (1) vertical (y, y); (2) 2-way (x, y)
"""
# Get indicator's type
indicator_type = self._myIndicatorsManager.get_line_type(indicator_key)
if indicator_type < 2:
# horizontal or vertical indicator
x, y = self._myIndicatorsManager.get_data(indicator_key)
if indicator_type == 0:
# horizontal
return y[0], y[0]
elif indicator_type == 1:
# vertical
return x[0], x[0]
else:
# 2-way
raise RuntimeError('Implement 2-way as soon as possible!')
return
def getLineStyleList(self):
"""
"""
return MplLineStyles
def getLineMarkerList(self):
"""
"""
return MplLineMarkers
def getLineBasicColorList(self):
"""
"""
return MplBasicColors
def getDefaultColorMarkerComboList(self):
""" Get a list of line/marker color and marker style combination
as default to add more and more line to plot
"""
return self._myCanvas.getDefaultColorMarkerComboList()
def getNextLineMarkerColorCombo(self):
""" As auto line's marker and color combo list is used,
get the NEXT marker/color combo
"""
# get from list
marker, color = self._myLineMarkerColorList[self._myLineMarkerColorIndex]
# process marker if it has information
if marker.count(' (') > 0:
marker = marker.split(' (')[0]
# update the index
self._myLineMarkerColorIndex += 1
if self._myLineMarkerColorIndex == len(self._myLineMarkerColorList):
self._myLineMarkerColorIndex = 0
return marker, color
def reset_line_color_marker_index(self):
""" Reset the auto index for line's color and style
"""
self._myLineMarkerColorIndex = 0
return
def set_title(self, title, color='black'):
"""
set title to canvas
:param title:
:param color:
:return:
"""
self._myCanvas.set_title(title, color)
return
def setXYLimit(self, xmin=None, xmax=None, ymin=None, ymax=None):
""" Set X-Y limit automatically
"""
self._myCanvas.axes.set_xlim([xmin, xmax])
self._myCanvas.axes.set_ylim([ymin, ymax])
self._myCanvas.draw()
return
def setAutoLineMarkerColorCombo(self):
""" Set the default/auto line marker/color combination list
"""
self._myLineMarkerColorList = list()
for marker in MplLineMarkers:
for color in MplBasicColors:
self._myLineMarkerColorList.append((marker, color))
return
def setLineMarkerColorIndex(self, newindex):
"""
"""
self._myLineMarkerColorIndex = newindex
return
class Qt4MplCanvas(FigureCanvas):
""" A customized Qt widget for matplotlib figure.
It can be used to replace GraphicsView of QtGui
"""
def __init__(self, parent):
""" Initialization
"""
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib.pyplot as plt
# Instantiating matplotlib Figure
self.fig = Figure()
self.fig.patch.set_facecolor('white')
if True:
self.axes = self.fig.add_subplot(111) # return: matplotlib.axes.AxesSubplot
self.fig.subplots_adjust(bottom=0.15)
self.axes2 = None
else:
self.axes = self.fig.add_host_subplot(111)
# Initialize parent class and set parent
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
# Set size policy to be able to expanding and resizable with frame
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# Variables to manage all lines/subplot
self._lineDict = {}
self._lineIndex = 0
# legend and color bar
self._colorBar = None
self._isLegendOn = False
self._legendFontSize = 8
return
@property
def is_legend_on(self):
"""
check whether the legend is shown or hide
Returns:
boolean
"""
return self._isLegendOn
def add_arrow(self, start_x, start_y, stop_x, stop_y):
"""
0, 0, 0.5, 0.5, head_width=0.05, head_length=0.1, fc='k', ec='k')
:return:
"""
head_width = 0.05
head_length = 0.1
fc = 'k'
ec = 'k'
self.axes.arrrow(start_x, start_y, stop_x, stop_y, head_width,
head_length, fc, ec)
return
def add_plot_1d(self, vec_x, vec_y, y_err=None, color=None, label="", x_label=None, y_label=None,
marker=None, line_style=None, line_width=1, show_legend=True):
"""
:param vec_x: numpy array X
:param vec_y: numpy array Y
:param y_err:
:param color:
:param label:
:param x_label:
:param y_label:
:param marker:
:param line_style:
:param line_width:
:param show_legend:
:return: new key
"""
# Check input
if isinstance(vec_x, np.ndarray) is False or isinstance(vec_y, np.ndarray) is False:
raise NotImplementedError('Input vec_x {0} or vec_y {1} for addPlot() must be numpy.array, but they are '
'{2} and {3}.'.format(vec_x, vec_y, type(vec_x), type(vec_y)))
plot_error = y_err is not None
if plot_error is True:
if isinstance(y_err, np.ndarray) is False:
raise NotImplementedError('Input y_err must be either None or numpy.array.')
if len(vec_x) != len(vec_y):
raise NotImplementedError('Input vec_x and vec_y must have same size.')
if plot_error is True and len(y_err) != len(vec_x):
raise NotImplementedError('Input vec_x, vec_y and y_error must have same size.')
# Hold previous data
self.axes.hold(True)
# set x-axis and y-axis label
if x_label is not None:
self.axes.set_xlabel(x_label, fontsize=16)
if y_label is not None:
self.axes.set_ylabel(y_label, fontsize=16)
# process inputs and defaults
if color is None:
color = (0, 1, 0, 1)
if marker is None:
marker = 'None'
if line_style is None:
line_style = '-'
# color must be RGBA (4-tuple)
if plot_error is False:
# return: list of matplotlib.lines.Line2D object
r = self.axes.plot(vec_x, vec_y, color=color, marker=marker, markersize=1, linestyle=line_style,
label=label, linewidth=line_width)
else:
r = self.axes.errorbar(vec_x, vec_y, yerr=y_err, color=color, marker=marker, linestyle=line_style,
label=label, linewidth=line_width)
self.axes.set_aspect('auto')
# set x-axis and y-axis label
if x_label is not None:
self.axes.set_xlabel(x_label, fontsize=20)
if y_label is not None:
self.axes.set_ylabel(y_label, fontsize=20)
# set/update legend
if show_legend:
self._setup_legend()
# Register
line_key = self._lineIndex
if plot_error:
msg = 'Return from plot is a {0}-tuple: {1} with plot error is {2}\n'.format(len(r), r, plot_error)
for i_r in range(len(r)):
msg += 'r[%d] = %s\n' % (i_r, str(r[i_r]))
raise NotImplementedError(msg)
else:
assert len(r) > 0, 'There must be at least 1 figure returned'
self._lineDict[line_key] = r[0]
self._lineIndex += 1
for i_r in range(1, len(r)):
# remove the un-defined extra lines
self.axes.lines.remove(r[i_r])
# END-IF-ELSE
# Flush/commit
self.draw()
return line_key
def add_1d_plot_right(self, x, y, color=None, label="", x_label=None, ylabel=None, marker=None, linestyle=None,
linewidth=1):
""" Add a line (1-d plot) at right axis
"""
if self.axes2 is None:
self.axes2 = self.axes.twinx()
# Hold previous data
self.axes2.hold(True)
# Default
if color is None:
color = (0, 1, 0, 1)
if marker is None:
marker = 'o'
if linestyle is None:
linestyle = '-'
# Special default
if len(label) == 0:
label = 'right'
color = 'red'
# color must be RGBA (4-tuple)
r = self.axes2.plot(x, y, color=color, marker=marker, linestyle=linestyle,
label=label, linewidth=linewidth)
# return: list of matplotlib.lines.Line2D object
self.axes2.set_aspect('auto')
# set x-axis and y-axis label
if x_label is not None:
self.axes2.set_xlabel(x_label, fontsize=20)
if ylabel is not None:
self.axes2.set_ylabel(ylabel, fontsize=20)
# set/update legend
self._setup_legend()
# Register
line_key = -1
if len(r) == 1:
line_key = self._lineIndex
self._lineDict[line_key] = r[0]
self._lineIndex += 1
else:
print("Impoooooooooooooooosible!")
# Flush/commit
self.draw()
return line_key
def addPlot2D(self, array2d, xmin, xmax, ymin, ymax, holdprev, yticklabels=None):
""" Add a 2D plot
Arguments:
- yticklabels :: list of string for y ticks
"""
# Release the current image
self.axes.hold(holdprev)
# Do plot
# y ticks will be shown on line 1, 4, 23, 24 and 30
# yticks = [1, 4, 23, 24, 30]
# self.axes.set_yticks(yticks)
# show image
imgplot = self.axes.imshow(array2d, extent=[xmin, xmax, ymin, ymax], interpolation='none')
# TODO/ISSUE/55: how to make this part more powerful
# set y ticks as an option:
if yticklabels is not None:
# it will always label the first N ticks even image is zoomed in
print("--------> [FixMe]: The way to set up the Y-axis ticks is wrong!")
#self.axes.set_yticklabels(yticklabels)
# explicitly set aspect ratio of the image
self.axes.set_aspect('auto')
# Set color bar. plt.colorbar() does not work!
if self._colorBar is None:
# set color map type
imgplot.set_cmap('spectral')
self._colorBar = self.fig.colorbar(imgplot)
else:
self._colorBar.update_bruteforce(imgplot)
# Flush...
self._flush()
return
def add_contour_plot(self, vec_x, vec_y, matrix_z):
"""
:param vec_x:
:param vec_y:
:param matrix_z:
:return:
"""
# create mesh grid
grid_x, grid_y = np.meshgrid(vec_x, vec_y)
# check size
assert grid_x.shape == matrix_z.shape, 'Size of X (%d) and Y (%d) must match size of Z (%s).' \
'' % (len(vec_x), len(vec_y), matrix_z.shape)
# Release the current image
self.axes.hold(False)
# Do plot
contour_plot = self.axes.contourf(grid_x, grid_y, matrix_z, 100)
labels = [item.get_text() for item in self.axes.get_yticklabels()]
print('[DB...BAT] Number of Y labels = ', len(labels), ', Number of Y = ', len(vec_y))
# TODO/ISSUE/55: how to make this part more powerful
if len(labels) == 2*len(vec_y) - 1:
new_labels = [''] * len(labels)
for i in range(len(vec_y)):
new_labels[i*2] = '%d' % int(vec_y[i])
self.axes.set_yticklabels(new_labels)
# explicitly set aspect ratio of the image
self.axes.set_aspect('auto')
# Set color bar. plt.colorbar() does not work!
if self._colorBar is None:
# set color map type
contour_plot.set_cmap('spectral')
self._colorBar = self.fig.colorbar(contour_plot)
else:
self._colorBar.update_bruteforce(contour_plot)
# Flush...
self._flush()
def addImage(self, imagefilename):
""" Add an image by file
"""
#import matplotlib.image as mpimg
# set aspect to auto mode
self.axes.set_aspect('auto')
img = matplotlib.image.imread(str(imagefilename))
# lum_img = img[:,:,0]
# FUTURE : refactor for image size, interpolation and origin
imgplot = self.axes.imshow(img, extent=[0, 1000, 800, 0], interpolation='none', origin='lower')
# Set color bar. plt.colorbar() does not work!
if self._colorBar is None:
# set color map type
imgplot.set_cmap('spectral')
self._colorBar = self.fig.colorbar(imgplot)
else:
self._colorBar.update_bruteforce(imgplot)
self._flush()
return
def clear_all_1d_plots(self):
""" Remove all lines from the canvas
"""
for ikey in self._lineDict.keys():
plot = self._lineDict[ikey]
if plot is None:
continue
if isinstance(plot, tuple) is False:
try:
self.axes.lines.remove(plot)
except ValueError as e:
print("[Error] Plot %s is not in axes.lines which has %d lines. Error mesage: %s" % (
str(plot), len(self.axes.lines), str(e)))
del self._lineDict[ikey]
else:
# error bar
plot[0].remove()
for line in plot[1]:
line.remove()
for line in plot[2]:
line.remove()
del self._lineDict[ikey]
# ENDIF(plot)
# ENDFOR
self._setup_legend()
self.draw()
return
def clear_canvas(self):
""" Clear data including lines and image from canvas
"""
# clear the image for next operation
self.axes.hold(False)
# Clear all lines
self.clear_all_1d_plots()
# clear image
self.axes.cla()
# Try to clear the color bar
if len(self.fig.axes) > 1:
self.fig.delaxes(self.fig.axes[1])
self._colorBar = None
# This clears the space claimed by color bar but destroys sub_plot too.
self.fig.clear()
# Re-create subplot
self.axes = self.fig.add_subplot(111)
self.fig.subplots_adjust(bottom=0.15)
# flush/commit
self._flush()
return
def decrease_legend_font_size(self):
"""
reset the legend with the new font size
Returns:
"""
# minimum legend font size is 2! return if it already uses the smallest font size.
if self._legendFontSize <= 2:
return
self._legendFontSize -= 1
self._setup_legend(font_size=self._legendFontSize)
self.draw()
return
def getLastPlotIndexKey(self):
""" Get the index/key of the last added line
"""
return self._lineIndex-1
def getPlot(self):
""" reture figure's axes to expose the matplotlib figure to PyQt client
"""
return self.axes
def getXLimit(self):
""" Get limit of Y-axis
"""
return self.axes.get_xlim()
def getYLimit(self):
""" Get limit of Y-axis
"""
return self.axes.get_ylim()
def hide_legend(self):
"""
hide the legend if it is not None
Returns:
"""
if self.axes.legend() is not None:
# set visible to be False and re-draw
self.axes.legend().set_visible(False)
self.draw()
self._isLegendOn = False
return
def increase_legend_font_size(self):
"""
reset the legend with the new font size
Returns:
"""
self._legendFontSize += 1
self._setup_legend(font_size=self._legendFontSize)
self.draw()
return
def setXYLimit(self, xmin, xmax, ymin, ymax):
"""
"""
# for X
xlims = self.axes.get_xlim()
xlims = list(xlims)
if xmin is not None:
xlims[0] = xmin
if xmax is not None:
xlims[1] = xmax
self.axes.set_xlim(xlims)
# for Y
ylims = self.axes.get_ylim()
ylims = list(ylims)
if ymin is not None:
ylims[0] = ymin
if ymax is not None:
ylims[1] = ymax
self.axes.set_ylim(ylims)
# try draw
self.draw()
return
def set_title(self, title, color, location='center'):
"""
set title to the figure (canvas) with default location at center
:param title:
:param color:
:param location
:return:
"""
# check input
assert isinstance(title, str), 'Title {0} must be a string but not a {1}.'.format(title, type(title))
assert isinstance(color, str) and len(color) > 0, 'Color {0} must be a non-empty string but not a {1}.' \
''.format(color, type(color))
assert isinstance(location, str) and len(location) > 0, 'Location {0} must be a non-empty string but not a {1}.' \
''.format(location, type(location))
# set title and re-draw to apply
self.axes.set_title(title, loc=location, color=color)
self.draw()
return
def remove_plot_1d(self, plot_key):
""" Remove the line with its index as key
:param plot_key:
:return:
"""
# Get all lines in list
lines = self.axes.lines
assert isinstance(lines, list), 'Lines must be list'
if plot_key in self._lineDict:
try:
self.axes.lines.remove(self._lineDict[plot_key])
except ValueError as r_error:
error_message = 'Unable to remove to 1D line %s (ID=%d) due to %s.' % (str(self._lineDict[plot_key]),
plot_key, str(r_error))
raise RuntimeError(error_message)
# remove the plot key from dictionary
del self._lineDict[plot_key]
else:
raise RuntimeError('Line with ID %s is not recorded.' % plot_key)
self._setup_legend(location='best', font_size=self._legendFontSize)
# Draw
self.draw()
return
def show_legend(self):
"""
show the legend if the legend is not None
Returns:
"""
if self.axes.legend() is not None:
# set visible to be True and re-draw
# self.axes.legend().set_visible(True)
self._setup_legend(font_size=self._legendFontSize)
self.draw()
# set flag on
self._isLegendOn = True
return
def updateLine(self, ikey, vecx=None, vecy=None, linestyle=None, linecolor=None, marker=None, markercolor=None):
"""
Update a plot line or a series plot line
Args:
ikey:
vecx:
vecy:
linestyle:
linecolor:
marker:
markercolor:
Returns:
"""
line = self._lineDict[ikey]
if line is None:
print('[ERROR] Line (key = %d) is None. Unable to update' % ikey)
return
if vecx is not None and vecy is not None:
line.set_xdata(vecx)
line.set_ydata(vecy)
if linecolor is not None:
line.set_color(linecolor)
if linestyle is not None:
line.set_linestyle(linestyle)
if marker is not None:
line.set_marker(marker)
if markercolor is not None:
line.set_markerfacecolor(markercolor)
oldlabel = line.get_label()
line.set_label(oldlabel)
self._setup_legend()
# commit
self.draw()
return
def get_data(self, line_id):
"""
Get vecX and vecY from line object in matplotlib
:param line_id:
:return: 2-tuple as vector X and vector Y
"""
# check
if line_id not in self._lineDict:
raise KeyError('Line ID %s does not exist.' % str(line_id))
# get line
line = self._lineDict[line_id]
if line is None:
raise RuntimeError('Line ID %s has been removed.' % line_id)
return line.get_xdata(), line.get_ydata()
def getLineStyleList(self):
"""
"""
return MplLineStyles
def getLineMarkerList(self):
"""
"""
return MplLineMarkers
def getLineBasicColorList(self):
"""
"""
return MplBasicColors
def getDefaultColorMarkerComboList(self):
""" Get a list of line/marker color and marker style combination
as default to add more and more line to plot
"""
combo_list = list()
num_markers = len(MplLineMarkers)
num_colors = len(MplBasicColors)
for i in range(num_markers):
marker = MplLineMarkers[i]
for j in range(num_colors):
color = MplBasicColors[j]
combo_list.append((marker, color))
# ENDFOR (j)
# ENDFOR(i)
return combo_list
def _flush(self):
""" A dirty hack to flush the image
"""
w, h = self.get_width_height()
self.resize(w+1, h)
self.resize(w, h)
return
def _setup_legend(self, location='best', font_size=10):
"""
Set up legend
self.axes.legend(): Handler is a Line2D object. Lable maps to the line object
Args:
location:
font_size:
Returns:
"""
allowed_location_list = [
"best",
"upper right",
"upper left",
"lower left",
"lower right",
"right",
"center left",
"center right",
"lower center",
"upper center",
"center"]
# Check legend location valid or not
if location not in allowed_location_list:
location = 'best'
handles, labels = self.axes.get_legend_handles_labels()
self.axes.legend(handles, labels, loc=location, fontsize=font_size)
self._isLegendOn = True
return
# END-OF-CLASS (MplGraphicsView)
class MyNavigationToolbar(NavigationToolbar2):
""" A customized navigation tool bar attached to canvas
Note:
* home, left, right: will not disable zoom/pan mode
* zoom and pan: will turn on/off both's mode
Other methods
* drag_pan(self, event): event handling method for dragging canvas in pan-mode
"""
NAVIGATION_MODE_NONE = 0
NAVIGATION_MODE_PAN = 1
NAVIGATION_MODE_ZOOM = 2
# This defines a signal called 'home_button_pressed' that takes 1 boolean
# argument for being in zoomed state or not
home_button_pressed = pyqtSignal()
# This defines a signal called 'canvas_zoom_released'
canvas_zoom_released = pyqtSignal()
def __init__(self, parent, canvas):
""" Initialization
built-in methods
- drag_zoom(self, event): triggered during holding the mouse and moving
"""
NavigationToolbar2.__init__(self, canvas, canvas)
# parent
self._myParent = parent
# tool bar mode
self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
# connect the events to parent
self.home_button_pressed.connect(self._myParent.evt_toolbar_home)
self.canvas_zoom_released.connect(self._myParent.evt_zoom_released)
return
@property
def is_zoom_mode(self):
"""
check whether the tool bar is in zoom mode
Returns
-------
"""
return self._myMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM
def get_mode(self):
"""
:return: integer as none/pan/zoom mode
"""
return self._myMode
# Overriding base's methods
def draw(self):
"""
Canvas is drawn called by pan(), zoom()
:return:
"""
NavigationToolbar2.draw(self)
self._myParent.evt_view_updated()
return
def home(self, *args):
"""
Parameters
----------
args
Returns
-------
"""
# call super's home() method
NavigationToolbar2.home(self, args)
# send a signal to parent class for further operation
self.home_button_pressed.emit()
return
def pan(self, *args):
"""
:param args:
:return:
"""
NavigationToolbar2.pan(self, args)
if self._myMode == MyNavigationToolbar.NAVIGATION_MODE_PAN:
# out of pan mode
self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
else:
# into pan mode
self._myMode = MyNavigationToolbar.NAVIGATION_MODE_PAN
print('PANNED')
return
def zoom(self, *args):
"""
Turn on/off zoom (zoom button)
:param args:
:return:
"""
NavigationToolbar2.zoom(self, args)
if self._myMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM:
# out of zoom mode
self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
else:
# into zoom mode
self._myMode = MyNavigationToolbar.NAVIGATION_MODE_ZOOM
return
def release_zoom(self, event):
"""
override zoom released method
Parameters
----------
event
Returns
-------
"""
self.canvas_zoom_released.emit()
NavigationToolbar2.release_zoom(self, event)
return
def _update_view(self):
"""
view update called by home(), back() and forward()
:return:
"""
NavigationToolbar2._update_view(self)
self._myParent.evt_view_updated()
return
| gpl-3.0 |
pompiduskus/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
dcprojects/CoolProp | dev/TTSE/check_TTSE_v4.py | 3 | 12652 | import CoolProp.CoolProp as CP
import matplotlib
matplotlib.rc('font', family='serif', serif='Times New Roman')
#from matplotlib2tikz import save as tikz_save
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.ticker
from matplotlib.patches import Ellipse
from matplotlib.transforms import ScaledTranslation
import numpy as np
import random
from numpy import linspace, meshgrid
from matplotlib.mlab import griddata
from matplotlib.gridspec import GridSpec
# Create the colourmap
#import numpy as np
#import matplotlib.pyplot as plt
import matplotlib._cm, matplotlib.cm
specs = matplotlib._cm.cubehelix(gamma=1.4,s=0.4,r=-0.8,h=2.0)
specs_r = matplotlib.cm._reverse_cmap_spec(specs)
matplotlib.cm.register_cmap(name="jorrithelix" , data=specs)
matplotlib.cm.register_cmap(name="jorrithelix"+"_r", data=specs_r)
def makeGrid(x, y, z, resX=200, resY=200):
"Convert 3 column data to matplotlib grid"
xi = linspace(min(x), max(x), resX)
yi = linspace(min(y), max(y), resY)
Z = griddata(x, y, z, xi, yi)
X, Y = meshgrid(xi, yi)
return X, Y, Z
def getErrors(p, h, out='D', Ref=''):
"Get the relative errors from table-based interpolation"
errorTTSE = 1e3
errorBICUBIC = 1e3
try:
# Using the EOS
CP.disable_TTSE_LUT(Ref)
EOS = CP.PropsSI(out,'P',p,'H',h,Ref)
# Using the TTSE method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"TTSE")
TTSE = CP.PropsSI(out,'P',p,'H',h,Ref)
# Using the Bicubic method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"BICUBIC")
BICUBIC = CP.PropsSI(out,'P',p,'H',h,Ref)
errorTTSE = abs(TTSE /EOS-1.0)*100.0
errorBICUBIC = abs(BICUBIC/EOS-1.0)*100.0
except ValueError as VE:
print VE
pass
return errorTTSE,errorBICUBIC
#['YlOrRd', 'PuBuGn', 'hot', 'cubehelix', 'gnuplot', 'gnuplot2']:
for colourmap in ['jorrithelix']:
for out in ['D']:
## landscape figure
#fig = plt.figure(figsize=(10,5))
#ax1 = fig.add_axes((0.08,0.1,0.32,0.83))
#ax2 = fig.add_axes((0.50,0.1,0.32,0.83))
#cbar_ax = fig.add_axes([0.80, 0.075, 0.05, 0.875])
# portrait figure
#fig = plt.figure(figsize=(5,8))
#ax1 = plt.subplot2grid((2,8), (0,0), colspan=7)
#ax2 = plt.subplot2grid((2,8), (1,0), colspan=7)
#cbar_ax = plt.subplot2grid((2,8), (0,7), colspan=1, rowspan=2)
#fig = plt.figure(figsize=(8,4))
#ax1 = plt.subplot2grid((1,7), (0,0), colspan=3)
#ax2 = plt.subplot2grid((1,7), (0,3), colspan=3)
#cbar_ax = plt.subplot2grid((1,7), (0,6), colspan=1, rowspan=1)
#plt.tight_layout()
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
#cbar_ax = plt.subplot2grid((1,7), (0,6), colspan=1, rowspan=1)
#plt.tight_layout()
#Ref = 'R245fa'
#Ref = 'Isopentane'
Ref = 'Air'
T = np.linspace(CP.PropsSI(Ref,'Tmin')+0.1,CP.PropsSI(Ref,'Tcrit')-0.01,300)
pV = CP.PropsSI('P','T',T,'Q',1,Ref)
hL = CP.PropsSI('H','T',T,'Q',0,Ref)
hV = CP.PropsSI('H','T',T,'Q',1,Ref)
hTP= np.append(hL,[hV[::-1]])
pTP= np.append(pV,[pV[::-1]])
HHH1, PPP1, EEE1 = [], [], []
HHH2, PPP2, EEE2 = [], [], []
cNorm = colors.LogNorm(vmin=1e-10, vmax=1e-1)
scalarMap = cmx.ScalarMappable(norm = cNorm, cmap = plt.get_cmap(colourmap))
# Setting the limits for enthalpy and pressure
p_min = CP.PropsSI(Ref,'ptriple')
p_max = 60e5
h_min = CP.PropsSI('H','T',CP.PropsSI(Ref,'Ttriple')+0.5,'Q',0,Ref)
h_max = CP.PropsSI('H','T',500+273.15,'P',p_max,Ref)
# Creating some isotherms for better illustration of the cycle
isoT = np.array([0,100,200,300,400])+273.15
isoP = np.logspace(np.log10(p_min),np.log10(p_max),base=10)
ones = np.ones(isoP.shape)
isoH = [ CP.PropsSI('H','T',T*ones,'P',isoP,Ref) for T in isoT ]
print "Lower left and upper right coordinates: ({0},{1}), ({2},{3})".format(h_min,p_min,h_max,p_max)
CP.set_TTSESinglePhase_LUT_range(Ref,h_min,h_max*1.05,p_min,p_max*1.05)
for a_useless_counter in range(40000):
h = random.uniform(h_min,h_max)
p = 10**random.uniform(np.log10(p_min),np.log10(p_max))
try:
# Using the EOS
CP.disable_TTSE_LUT(Ref)
rhoEOS = CP.PropsSI('D','P',p,'H',h,Ref)
TEOS = CP.PropsSI('T','P',p,'H',h,Ref)
if out =='C': cpEOS = CP.PropsSI('C','P',p,'H',h,Ref)
# Using the TTSE method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"TTSE")
rhoTTSE = CP.PropsSI('D','P',p,'H',h,Ref)
TTTSE = CP.PropsSI('T','P',p,'H',h,Ref)
if out =='C': cpTTSE = CP.PropsSI('C','P',p,'H',h,Ref)
# Using the Bicubic method
CP.enable_TTSE_LUT(Ref)
CP.set_TTSE_mode(Ref,"BICUBIC")
rhoBICUBIC = CP.PropsSI('D','P',p,'H',h,Ref)
TBICUBIC = CP.PropsSI('T','P',p,'H',h,Ref)
if out =='C': cpBICUBIC = CP.PropsSI('C','P',p,'H',h,Ref)
if out == 'D':
errorTTSE = abs(rhoTTSE/rhoEOS-1)*100
errorBICUBIC = abs(rhoBICUBIC/rhoEOS-1)*100
elif out == 'T':
errorTTSE = abs(TTTSE/TEOS-1)*100
errorBICUBIC = abs(TBICUBIC/TEOS-1)*100
elif out == 'C':
errorTTSE = abs(cpTTSE/cpEOS-1)*100
errorBICUBIC = abs(cpBICUBIC/cpEOS-1)*100
HHH1.append(h)
PPP1.append(p)
EEE1.append(errorTTSE)
HHH2.append(h)
PPP2.append(p)
EEE2.append(errorBICUBIC)
except ValueError as VE:
#print VE
pass
HHH1 = np.array(HHH1)
PPP1 = np.array(PPP1)
SC1 = ax1.scatter(HHH1/1e3, PPP1/1e5, s=8, c=EEE1, edgecolors = 'none', cmap = plt.get_cmap(colourmap), norm = cNorm, rasterized=True)
#X, Y, Z = makeGrid(HHH1, np.log10(PPP1), EEE1)
#SC1 = matplotlib.pyplot.contourf(X, Y, Z,
# alpha=0.75,
# norm=cNorm,
# cmap=matplotlib.pyplot.get_cmap(colourmap))#,
# #rasterized=True)
HHH2 = np.array(HHH2)
PPP2 = np.array(PPP2)
SC2 = ax2.scatter(HHH2/1e3, PPP2/1e5, s=8, c=EEE2, edgecolors = 'none', cmap = plt.get_cmap(colourmap), norm = cNorm, rasterized=True)
if out == 'D':
ax1.set_title('rel. density error, TTSE')
ax2.set_title('rel. density error, bicubic')
elif out == 'T':
ax1.set_title('rel. temperature error, TTSE')
ax2.set_title('rel. temperature error, bicubic')
elif out == 'C':
ax1.set_title('rel. heat capacity error, TTSE')
ax2.set_title('rel. heat capacity error, bicubic')
for ax in [ax1, ax2]:
#h_min = np.ceil(h_min)
delta = 0.1
delta_min = 1.0+delta
delta_max = 1.0-delta
#ax.set_xlim(delta_min*h_min/1e3, delta_max*h_max/1e3)
#ax.set_ylim(delta_min*p_min/1e5, delta_max*p_max/1e5)
ax.set_xlim(-155, 800)
ax.set_ylim(0.025, 58)
ax.set_yscale('log')
#ticks = np.array([0.02,0.05,0.1,0.2,0.5,1,2,5,10,20,50])
ticks = np.array([0.05,0.1,0.2,0.5,1,2,5,10,20,50])
labels = [str(tick) for tick in ticks]
ax.set_yticks(ticks)
ax.set_yticklabels(labels)
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#ticks = [150,250,350,450,550]
#labels = [str(tick) for tick in ticks]
#ax.set_xticks(ticks)
#ax.set_xticklabels(labels)
#ax.tick_params(axis='y',which='minor',left='off')
#ax.set_xlabel('Enthalpy [kJ \cdot kg^{-1}]')
ax.set_xlabel('Specific Enthalpy [kJ$\cdot$kg$\mathdefault{^{-1}\!}$]')
ax.set_ylabel('Pressure [bar]')
#ax.plot(hL/1e3,pV/1e5,'k',lw = 4)
#ax.plot(hV/1e3,pV/1e5,'k',lw = 4)
ax.plot(hTP/1e3,pTP/1e5,'k',lw = 3)
for i,T in enumerate(isoT):
ax.plot(isoH[i]/1e3,isoP/1e5,'k',lw = 1)
#CB = fig.colorbar(SC1)
#cbar_ax = fig.add_axes([0.80, 0.075, 0.05, 0.875])
#CB = fig.colorbar(SC1, cax=cbar_ax)
#CB = matplotlib.pyplot.colorbar(SC2)
#CB.solids.set_rasterized(True)
#ax2.yaxis.set_visible(False)
#[x0,y0,width,height]
#cbar_ax = fig.add_axes([0.95, 0.00, 0.05, 1.00])
#CB = fig.colorbar(SC2, ax=[ax1,ax2], cax=cbar_ax)
#CB.solids.set_rasterized(True)
#from mpl_toolkits.axes_grid1 import make_axes_locatable
#divider = make_axes_locatable(ax2)
#cbar_ax = divider.append_axes("right", "5%", pad="0%")
#CB = plt.colorbar(SC2, cax=cbar_ax)
#CB.solids.set_rasterized(True)
#CB = fig.colorbar(SC2)
#CB.solids.set_rasterized(True)
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax2)
ax_cb = divider.new_horizontal(size="5%", pad=0.05)
#fig1 = ax.get_figure()
fig.add_axes(ax_cb)
CB = fig.colorbar(SC2, cax=ax_cb)
#aspect = 5./2.
#ax1.set_aspect(aspect)
#ax2.set_aspect(aspect)
CB.solids.set_rasterized(True)
if out == 'D':
CB.set_label(r'$\|\rho/\rho\mathdefault{_{EOS}-1\|\times 100}$ [%]')
elif out == 'T':
CB.set_label(r'$\|T/T\mathdefault{_{EOS}-1\|\times 100}$ [%]')
elif out == 'C':
CB.set_label(r'$\|c\mathdefault{_p}/c\mathdefault{_{p,EOS}-1\|\times 100}$ [%]')
# The plot is finished, now we add an ellipse
#circle=plt.Circle((5,5),.5,color='b',fill=False)
#A scale-free ellipse.
#xy - center of ellipse
#width - total length (diameter) of horizontal axis
#height - total length (diameter) of vertical axis
#angle - rotation in degrees (anti-clockwise)
p_op_min = 1e5
p_op_max = 3e5
h_op_min = CP.PropsSI('H','T',400+273.15,'P',p_op_max,Ref)
h_op_max = CP.PropsSI('H','T', 25+273.15,'P',p_op_max,Ref)
p_op_cen = (p_op_min + p_op_max) / 2.0
h_op_cen = (h_op_min + h_op_max) / 2.0
p_op_hei = p_op_max - p_op_min
h_op_wid = h_op_max - h_op_min
#for ax in [ax1, ax2]:
##x,y = 10,0
### use the axis scale tform to figure out how far to translate
##circ_offset = ScaledTranslation(x,y,ax.transScale)
### construct the composite tform
##circ_tform = circ_offset + ax.transLimits + ax.transAxes
#ellipse = Ellipse(xy=(h_op_cen,p_op_cen), width=h_op_wid, height=p_op_hei, angle=15, color='black')#, transform=circ_tform)
#ax.add_artist(ellipse)
# font_def = font_manager.FontProperties(family='Helvetica', style='normal',
# size=sizeOfFont, weight='normal', stretch='normal')
#
# for a in fig.axes:
# for label in [a.get_xticklabels(), a.get_yticklabels()]:
# label.set_fontproperties(ticks_font
#plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.png', dpi = 300, transparent = True)
#plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.eps')
# plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.pdf')
plt.tight_layout()
plt.savefig('check_TTSE_'+colourmap+'.pdf' )
#tikz_save( 'check_TTSE.tikz')
#plt.savefig(out+'_'+colourmap+'_TTSE_BICUBIC.jpg', dpi = 1200)
plt.close() | mit |
molgor/spystats | spystats/sandbox/2-geostatistics.py | 1 | 2381 |
import GPflow as gf
import numpy as np
import scipy.spatial.distance as sp
from matplotlib import pyplot as plt
plt.style.use('ggplot')
# %matplotlib inline
N = 1000
phi = 0.05
sigma2 = 2
nugget = 1
X = np.random.rand(N,4)
# plt.plot(X[:,0], X[:,1], 'kx', mew=2)
# plt.show()
distance = sp.squareform(sp.pdist(X[:, 0:2]))
correlation = np.exp(- distance / phi)
covariance = correlation * sigma2
# plt.imshow(covariance)
# plt.show()
mu = 10 + 1.5 * X[:, 2] - 1 * X[:, 3]
mu = mu.reshape(N,1)
S = np.random.multivariate_normal(np.zeros(N), correlation) +\
np.random.normal(size = N) * nugget
S = S.reshape(N,1)
Y = mu + S
plt.scatter(X[:, 0], X[:, 1], c = S)
plt.show()
# ONLY GAUSSIAN PROCESS --------------------------------------------------------
# Defining the model
k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1] )
X1 = X[:, 0:2]
m = gf.gpr.GPR(X1, S, k)
m.likelihood.variance = 0.1
print(m)
# Estimation
m.optimize()
print(m)
# GAUSSIAN PROCESS WITH LINEAR TREND -------------------------------------------
# Defining the model
k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1] )
meanf = gf.mean_functions.Linear(np.ones((4,1)), np.ones(1))
m = gf.gpr.GPR(X, Y, k, meanf)
m.likelihood.variance = 0.1
print(m)
# Estimation
m.optimize()
print(m)
# ------------------------------------------------------------------------------
# Defining the model
k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1])
meanf = gf.mean_functions.LinearG(np.ones((2,1)), np.ones(1))
m = gf.gpr.GPR(X, Y, k, meanf)
m.likelihood.variance = 0.1
print(m)
# Estimation
m.optimize()
print(m)
plot(m)
plt.show()
# PREDICTION -------------------------------------------------------------------
def plot(m):
xx = np.linspace(-0.1, 1.1, 100)[:,None]
mean, var = m.predict_y(xx)
plt.figure(figsize=(12, 6))
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(xx[:,0], mean[:,0] - 2*np.sqrt(var[:,0]), mean[:,0] + 2*np.sqrt(var[:,0]), color='blue', alpha=0.2)
plt.xlim(-0.1, 1.1)
plot(m)
plt.show()
# MEAN FUNCTIONS ---------------------------------------------------------------
k = GPflow.kernels.Matern52(1, lengthscales=0.3)
meanf = GPflow.mean_functions.Constant(1)
m = GPflow.gpr.GPR(X, Y, k, meanf)
m.likelihood.variance = 0.01
print(m)
m.optimize()
plot(m)
plt.show()
print(m)
| bsd-2-clause |
StagPython/StagPy | setup.py | 1 | 1411 | import os
from setuptools import setup
with open('README.rst') as rdm:
README = rdm.read()
DEPENDENCIES = [
'loam>=0.3.1',
'f90nml>=1.2',
'setuptools_scm>=4.1',
]
HEAVY = [
'numpy>=1.19',
'scipy>=1.5',
'pandas>=1.1',
'h5py>=3.0',
'matplotlib>=3.3',
]
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
# heavy dependencies are mocked out on Read the Docs
if not ON_RTD:
DEPENDENCIES.extend(HEAVY)
setup(
name='stagpy',
use_scm_version=True,
description='Tool for StagYY output files processing',
long_description=README,
url='https://github.com/StagPython/StagPy',
author='Martina Ulvrova, Adrien Morison, Stéphane Labrosse',
author_email='[email protected]',
license='Apache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires='>=3.6',
packages=['stagpy'],
entry_points={
'console_scripts': ['stagpy = stagpy.__main__:main']
},
include_package_data=True,
install_requires=DEPENDENCIES,
)
| apache-2.0 |
aleksandr-bakanov/astropy | examples/coordinates/plot_galactocentric-frame.py | 2 | 7979 | # -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.ICRS(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s)
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interepreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.Galactocentric(ring_rep)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel('$v_x$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
axes[1].set_ylabel('$v_y$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
fig.tight_layout()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig,ax = plt.subplots(1, 1, figsize=(8,6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(r'$\mu_l \, \cos b$ [{0}]'.format((u.mas/u.yr).to_string('latex_inline')))
ax.legend()
| bsd-3-clause |
pazeshun/jsk_apc | demos/instance_occlsegm/instance_occlsegm_lib/datasets/apc/apc2016/jsk.py | 2 | 3764 | import glob
import os
import os.path as osp
import re
import chainer
import numpy as np
import PIL.Image
import skimage.io
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from .base import class_names_apc2016
import instance_occlsegm_lib.data
from instance_occlsegm_lib.datasets import config
class JskAPC2016Dataset(chainer.dataset.DatasetMixin):
class_names = class_names_apc2016
_root_dir = osp.join(config.ROOT_DIR, 'APC2016')
def __init__(self, split):
assert split in ['all', 'train', 'valid']
self.split = split
self._init_ids()
def _init_ids(self):
ids = []
# APC2016rbo
dataset_dir = osp.join(self._root_dir, 'APC2016rbo')
if not osp.exists(dataset_dir):
self.download()
for img_file in os.listdir(dataset_dir):
if not re.match(r'^.*_[0-9]*_bin_[a-l].jpg$', img_file):
continue
data_id = osp.splitext(img_file)[0]
ids.append(('rbo', data_id))
# APC2016seg
dataset_dir = osp.join(self._root_dir, 'annotated')
if not osp.exists(dataset_dir):
self.download()
for scene_dir in os.listdir(dataset_dir):
if osp.isdir(scene_dir):
ids.append(('seg', scene_dir))
ids_train, ids_valid = train_test_split(
ids, test_size=0.25, random_state=5)
self._ids = {'all': ids, 'train': ids_train, 'valid': ids_valid}
def __len__(self):
return len(self._ids[self.split])
def download(self):
instance_occlsegm_lib.data.download(
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vSV9oLTd1U2I3TDg',
path=osp.join(self._root_dir, 'APC2016rbo.tgz'),
md5='efd7f1d5420636ee2b2827e7e0f5d1ac',
postprocess=instance_occlsegm_lib.data.extractall,
)
instance_occlsegm_lib.data.download(
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vaExFU1AxWHlMdTg',
path=osp.join(self._root_dir, 'APC2016jsk.tgz'),
md5='8f1641f52fff90154533f84b9eb111a5',
postprocess=instance_occlsegm_lib.data.extractall,
)
def get_example(self, i):
data_type, data_id = self._ids[self.split][i]
if data_type == 'seg':
dataset_dir = osp.join(self._root_dir, 'annotated')
img_file = osp.join(dataset_dir, data_id, 'image.png')
label_file = osp.join(dataset_dir, data_id, 'label.png')
img = skimage.io.imread(img_file)
assert img.dtype == np.uint8
label = np.array(PIL.Image.open(label_file), dtype=np.int32)
label[label == 255] = -1
elif data_type == 'rbo':
dataset_dir = osp.join(self._root_dir, 'APC2016rbo')
img_file = osp.join(dataset_dir, data_id + '.jpg')
img = skimage.io.imread(img_file)
label = np.zeros(img.shape[:2], dtype=np.int32)
shelf_bin_mask_file = osp.join(dataset_dir, data_id + '.pbm')
shelf_bin_mask = skimage.io.imread(
shelf_bin_mask_file, as_gray=True
)
label[shelf_bin_mask < 127] = -1
mask_glob = osp.join(dataset_dir, data_id + '_*.pbm')
for mask_file in glob.glob(mask_glob):
mask_id = osp.splitext(osp.basename(mask_file))[0]
mask = skimage.io.imread(mask_file, as_gray=True)
label_name = mask_id[len(data_id + '_'):]
label_value = self.class_names.index(label_name)
label[mask > 127] = label_value
else:
raise ValueError
return img, label
| bsd-3-clause |
sadahanu/DataScience_SideProject | Stack_Exchange/py2_text.py | 1 | 7689 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 13 23:10:40 2016
@author: zhouyu
"""
#%%
import pandas as pd
import numpy as np
import os
import re
import nltk
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
os.chdir('/Users/zhouyu/Documents/Zhou_Yu/DS/kaggle_challenge/text processing')
#%% step1: import data
import glob
alltrainfiles = glob.glob("*.csv")
raw_text =pd.concat((pd.read_csv(f,index_col = None, header =0) for f in alltrainfiles),ignore_index = True)
#raw_text = pd.read_csv("crypto.csv",index_col = None)
#%% step2: clean data, remove HTML, symbols and stopwords
def text_to_words(rawtext):
#split into individual words, remove HTML, only keep letters and number
# convert letters to lower case
reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]')
words = [word for word in reg_c.split(rawtext.lower()) if word!='']
stops = set(stopwords.words("english"))
#take out stop words
meaningful_words = [w for w in words if not w in stops]
return(" ".join(meaningful_words))
def target_to_words(rawtext):
#only return the first target word
reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]')
words = [word for word in reg_c.split(rawtext.lower()) if word!='']
stops = set(stopwords.words("english"))
#take out stop words
meaningful_words = [w for w in words if not w in stops]
return(meaningful_words[0])
#%%
cleaned_post = []
cleaned_target = []
sz = raw_text.shape[0]
for i in range(0,sz):
raw_post = raw_text['title'][i]+' '+raw_text['content'][i]
raw_post = BeautifulSoup(raw_post).get_text()
cleaned_post.append(text_to_words(raw_post))
cleaned_target.append(target_to_words(raw_text['tags'][i]))
if((i+1)%1000==0):
print "Cleanning %d of %d\n" % (i+1,sz)
#print cleaned_post[1]
#%% step3: creating features from a bag of words
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
X_train_counts = count_vect.fit_transform(cleaned_post)
#X_target_counts = count_vect.fit_transform(cleaned_target)
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer(use_idf = False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
#%% training a linear model
# METHOD 1: BUILD randomforestclassifier...
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators = 10)
forest = rf.fit(X_train_tf, cleaned_target)
#%% examine the result produced by METHOD 1:
pred = rf.predict(X_train_tf)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from collections import OrderedDict
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cnf_matrix = confusion_matrix(cleaned_target,pred)
#target_names = set(cleaned_target)
#np.set_printoptions(precision = 2)
#plt.figure()
#plot_confusion_matrix(cnf_matrix,classes = target_names,normalize = True,title='Normalized confusion matrix')
#plt.show()
target_names = list(OrderedDict.fromkeys(cleaned_target))
print(classification_report(cleaned_target,pred,target_names = target_names))
#######
#%% Method 2: directly predicted as the highest frequency element
# find the highest tf-idf
#step1: select a random sample
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from collections import OrderedDict
sample = np.random.choice(87000,1000,replace = False)
tf_pred = []
tf_target = []
for i in range(0,1000):
r = sample[i];
tf_target.append(cleaned_target[r])
tf_post = X_train_tf.getrow(r).toarray()
tf_post_max = tf_post.argmax()
tf_pred.append(count_vect.get_feature_names()[tf_post_max])
tf_cnf_matrix = confusion_matrix(tf_target,tf_pred)
target_names = list(OrderedDict.fromkeys(tf_pred+tf_target))
print(classification_report(tf_target, tf_pred,target_names =target_names))
#%% evaluate test set
test = pd.read_csv('test/test.csv')
cleaned_test = []
test_sz = test.shape[0]
for i in range(0,test_sz):
test_post = test['title'][i]+' '+test['content'][i]
test_post = BeautifulSoup(test_post).get_text()
cleaned_test.append(text_to_words(test_post))
if((i+1)%1000==0):
print "Cleanning %d of %d\n" % (i+1,test_sz)
#%% use random forest
X_test_counts = count_vect.fit_transform(cleaned_test)
X_test_tf = tf_transformer.transform(X_test_counts)
result = forest.predict(X_test_counts)
# use max tf-idf
#%%
test_pred = []
for i in range(0,test_sz):
tf_test = X_test_tf.getrow(i).toarray()
# just return one tag
#tf_test_max = tf_test.argmax()
#test_pred.append(count_vect.get_feature_names()[tf_test_max])
ind = np.argpartition(tf_test,-4)[:,-4:]
pred_tags = [count_vect.get_feature_names()[j] for j in ind[0,:].tolist()]
test_pred.append( " ".join(pred_tags))
if((i+1)%1000==0):
print "Predicting %d of %d\n" % (i+1,test_sz)
result = test_pred
#%% prepare submission
submission = pd.read_csv('test/sample_submission.csv')
submission.iloc[:,1] = result
submission.to_csv('test/submission.csv',index = None)
#%% try to use NMF model can not be mapped to specific question...
n_features = 5000
n_topics = 10
n_samples = test_sz
n_top_words = 4
def get_top_words(model, feature_names, n_top_words):
res = []
for topic_idx, topic in enumerate(model.components_):
tags = " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
res.append(tags)
return res
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF
from time import time
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(cleaned_test)
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
#print_top_words(nmf, tfidf_feature_names, n_top_words)
result = get_top_words(nmf,tfidf_feature_names,n_top_words) | mit |
antgonza/qiita | qiita_db/metadata_template/base_metadata_template.py | 1 | 62503 | r"""
Metadata template objects (:mod: `qiita_db.metadata_template)
=============================================================
..currentmodule:: qiita_db.metadata_template
This module provides the MetadataTemplate base class and the subclasses
SampleTemplate and PrepTemplate.
Classes
-------
..autosummary::
:toctree: generated/
BaseSample
Sample
PrepSample
MetadataTemplate
SampleTemplate
PrepTemplate
Methods
-------
..autosummary::
:toctree: generated/
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from itertools import chain
from copy import deepcopy
from datetime import datetime
from json import loads, dumps
import pandas as pd
import numpy as np
from skbio.util import find_duplicates
import warnings
from qiita_core.exceptions import IncompetentQiitaDeveloperError
import qiita_db as qdb
from string import ascii_letters, digits
# this is the name of the sample where we store all columns for a sample/prep
# information
QIITA_COLUMN_NAME = 'qiita_sample_column_names'
def _helper_get_categories(table):
"""This is a helper function to avoid duplication of code"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_values->>'columns'
FROM qiita.{0}
WHERE sample_id = '{1}'""".format(table, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql)
results = qdb.sql_connection.TRN.execute_fetchflatten()
if results:
results = sorted(loads(results[0]))
return results
class BaseSample(qdb.base.QiitaObject):
r"""Sample object that accesses the db to get the information of a sample
belonging to a PrepTemplate or a SampleTemplate.
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template obj to which the sample belongs to
Methods
-------
__eq__
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
exists
keys
values
items
get
See Also
--------
QiitaObject
Sample
PrepSample
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_id_column = None
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : MetadataTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If its call directly from the Base class
If `md_template` doesn't have the correct type
"""
raise IncompetentQiitaDeveloperError()
def __init__(self, sample_id, md_template):
r"""Initializes the object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template in which the sample is present
Raises
------
QiitaDBUnknownIDError
If `sample_id` does not correspond to any sample in md_template
"""
# Check that we are not instantiating the base class
self._check_subclass()
# Check that the md_template is of the correct type
self._check_template_class(md_template)
# Check if the sample id is present on the passed metadata template
# This test will check that the sample id is actually present on the db
if sample_id not in md_template:
raise qdb.exceptions.QiitaDBUnknownIDError(
sample_id, self.__class__.__name__)
# Assign private attributes
self._id = sample_id
self._md_template = md_template
self._dynamic_table = "%s%d" % (self._table_prefix,
self._md_template.id)
def __hash__(self):
r"""Defines the hash function so samples are hashable"""
return hash(self._id)
def __eq__(self, other):
r"""Self and other are equal based on type and ids"""
if not isinstance(other, type(self)):
return False
if other._id != self._id:
return False
if other._md_template != self._md_template:
return False
return True
@classmethod
def exists(cls, sample_id, md_template):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
sample_id : str
The sample id
md_template : MetadataTemplate
The metadata template to which the sample belongs to
Returns
-------
bool
True if already exists. False otherwise.
"""
with qdb.sql_connection.TRN:
cls._check_subclass()
sql = """SELECT EXISTS(
SELECT * FROM qiita.{0}
WHERE sample_id=%s AND {1}=%s
)""".format(cls._table, cls._id_column)
qdb.sql_connection.TRN.add(sql, [sample_id, md_template.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def _get_categories(self):
r"""Returns all the available metadata categories for the sample
Returns
-------
set of str
The set of all available metadata categories
"""
return set(_helper_get_categories(self._dynamic_table))
def _to_dict(self):
r"""Returns the categories and their values in a dictionary
Returns
-------
dict of {str: str}
A dictionary of the form {category: value}
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_values
FROM qiita.{0}
WHERE sample_id=%s""".format(self._dynamic_table)
qdb.sql_connection.TRN.add(sql, [self._id])
result = qdb.sql_connection.TRN.execute_fetchindex()
return result[0]['sample_values']
def __len__(self):
r"""Returns the number of metadata categories
Returns
-------
int
The number of metadata categories
"""
# return the number of columns
return len(self._get_categories())
def __getitem__(self, key):
r"""Returns the value of the metadata category `key`
Parameters
----------
key : str
The metadata category
Returns
-------
obj
The value of the metadata category `key`
Raises
------
KeyError
If the metadata category `key` does not exists
See Also
--------
get
"""
with qdb.sql_connection.TRN:
key = key.lower()
if key not in self._get_categories():
# The key is not available for the sample, so raise a KeyError
raise KeyError(
"Metadata category %s does not exists for sample %s"
" in template %d" % (key, self._id, self._md_template.id))
sql = """SELECT sample_values->>'{0}' as {0}
FROM qiita.{1}
WHERE sample_id = %s""".format(
key, self._dynamic_table)
qdb.sql_connection.TRN.add(sql, [self._id])
return qdb.sql_connection.TRN.execute_fetchlast()
def setitem(self, column, value):
"""Sets `value` as value for the given `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
Raises
------
QiitaDBColumnError
If the column does not exist in the table
"""
# Check if the column exist in the table
if column not in self._get_categories():
raise qdb.exceptions.QiitaDBColumnError(
"Column %s does not exist in %s" %
(column, self._dynamic_table))
sql = """UPDATE qiita.{0}
SET sample_values = sample_values || %s
WHERE sample_id = %s""".format(self._dynamic_table)
qdb.sql_connection.perform_as_transaction(
sql, [dumps({column: value}), self.id])
def __setitem__(self, column, value):
r"""Sets the metadata value for the category `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
"""
with qdb.sql_connection.TRN:
self.setitem(column, value)
qdb.sql_connection.TRN.execute()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise qdb.exceptions.QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the metadata keys
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
return iter(self._get_categories())
def __contains__(self, key):
r"""Checks if the metadata category `key` is present
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the metadata category `key` is present, false otherwise
"""
return key.lower() in self._get_categories()
def keys(self):
r"""Iterator over the metadata categories
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values, in metadata category order
Returns
-------
Iterator
Iterator over metadata values
"""
d = self._to_dict()
return d.values()
def items(self):
r"""Iterator over (category, value) tuples
Returns
-------
Iterator
Iterator over (category, value) tuples
"""
d = self._to_dict()
return d.items()
def get(self, key):
r"""Returns the metadata value for category `key`, or None if the
category `key` is not present
Parameters
----------
key : str
The metadata category
Returns
-------
Obj or None
The value object for the category `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
class MetadataTemplate(qdb.base.QiitaObject):
r"""Metadata map object that accesses the db to get the sample/prep
template information
Attributes
----------
id
Methods
-------
exists
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
keys
values
items
get
to_file
add_filepath
update
metadata_headers
delete_column
See Also
--------
QiitaObject
SampleTemplate
PrepTemplate
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_id_column = None
_sample_cls = None
# forbidden_words not defined for base class. Please redefine for
# sub-classes.
_forbidden_words = {}
def _check_id(self, id_):
r"""Checks that the MetadataTemplate id_ exists on the database"""
with qdb.sql_connection.TRN:
sql = "SELECT EXISTS(SELECT * FROM qiita.{0} WHERE {1}=%s)".format(
self._table, self._id_column)
qdb.sql_connection.TRN.add(sql, [id_])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def _table_name(cls, obj_id):
r"""Returns the dynamic table name
Parameters
----------
obj_id : int
The id of the metadata template
Returns
-------
str
The table name
Raises
------
IncompetentQiitaDeveloperError
If called from the base class directly
"""
if not cls._table_prefix:
raise IncompetentQiitaDeveloperError(
"_table_prefix should be defined in the subclasses")
return "%s%d" % (cls._table_prefix, obj_id)
@classmethod
def _clean_validate_template(cls, md_template, study_id,
current_columns=None):
"""Takes care of all validation and cleaning of metadata templates
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
study_id : int
The study to which the metadata template belongs to.
current_columns : iterable of str, optional
The current list of metadata columns
Returns
-------
md_template : DataFrame
Cleaned deep-copy of the input md_template:
Removes 'qiita_study_id' and 'qiita_prep_id' columns,
if present.
Raises
------
QiitaDBColumnError
If the column names in md_template contains invalid characters,
forbidden words, or PostgreSQL-reserved words.
QiitaDBWarning
If there are missing columns required for some functionality
"""
cls._check_subclass()
invalid_ids = qdb.metadata_template.util.get_invalid_sample_names(
md_template.index)
if invalid_ids:
raise qdb.exceptions.QiitaDBColumnError(
"The following sample names in the template contain invalid "
"characters (only alphanumeric characters or periods are "
"allowed): %s." % ", ".join(invalid_ids))
if len(set(md_template.index)) != len(md_template.index):
raise qdb.exceptions.QiitaDBDuplicateSamplesError(
find_duplicates(md_template.index))
# We are going to modify the md_template. We create a copy so
# we don't modify the user one
md_template = md_template.copy(deep=True)
# In the database, all the column headers are lowercase
md_template.columns = [c.lower() for c in md_template.columns]
# drop these columns in the result
if 'qiita_study_id' in md_template.columns:
del md_template['qiita_study_id']
if 'qiita_prep_id' in md_template.columns:
del md_template['qiita_prep_id']
# validating pgsql reserved words not to be column headers
current_headers = set(md_template.columns.values)
# testing for specific column names that are not included in the other
# tests.
pgsql_reserved = cls._identify_pgsql_reserved_words_in_column_names(
current_headers)
invalid = cls._identify_column_names_with_invalid_characters(
current_headers)
forbidden = cls._identify_forbidden_words_in_column_names(
current_headers)
qiime2_reserved = cls._identify_qiime2_reserved_words_in_column_names(
current_headers)
error = []
if pgsql_reserved:
error.append(
"These column names are PgSQL reserved words, replace them: "
"~~ %s ~~." % ", ".join(pgsql_reserved))
if invalid:
error.append(
"These column names contain invalid chars, remove or replace "
"them: ~~ %s ~~." % ", ".join(invalid))
if forbidden:
error.append(
"These column names are not valid in this information file, "
"remove them: ~~ %s ~~." % ", ".join(forbidden))
if qiime2_reserved:
error.append(
"These columns are QIIME2 reserved words, replace them: "
" ~~ %s ~~." % ", ".join(pgsql_reserved))
if error:
raise qdb.exceptions.QiitaDBColumnError(
"%s\nYou need to modify them." % '\n'.join(error))
# Prefix the sample names with the study_id
qdb.metadata_template.util.prefix_sample_names_with_id(md_template,
study_id)
# Check that we don't have duplicate columns
if len(set(md_template.columns)) != len(md_template.columns):
raise qdb.exceptions.QiitaDBDuplicateHeaderError(
find_duplicates(md_template.columns))
return md_template
@classmethod
def _common_creation_steps(cls, md_template, obj_id):
r"""Executes the common creation steps
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
obj_id : int
The id of the object being created
"""
with qdb.sql_connection.TRN:
cls._check_subclass()
# Get some useful information from the metadata template
sample_ids = md_template.index.tolist()
headers = sorted(md_template.keys().tolist())
if not headers:
raise ValueError("Your info file only has sample_name")
# Insert values on template_sample table
values = [[obj_id, s_id] for s_id in sample_ids]
sql = """INSERT INTO qiita.{0} ({1}, sample_id)
VALUES (%s, %s)""".format(cls._table, cls._id_column)
qdb.sql_connection.TRN.add(sql, values, many=True)
# Create table with custom columns
table_name = cls._table_name(obj_id)
sql = """CREATE TABLE qiita.{0} (
sample_id VARCHAR NOT NULL PRIMARY KEY,
sample_values JSONB NOT NULL)""".format(table_name)
qdb.sql_connection.TRN.add(sql)
values = dumps({"columns": md_template.columns.tolist()})
sql = """INSERT INTO qiita.{0} (sample_id, sample_values)
VALUES ('{1}', %s)""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
values = [(k, df.to_json()) for k, df in md_template.iterrows()]
sql = """INSERT INTO qiita.{0} (sample_id, sample_values)
VALUES (%s, %s)""".format(table_name)
qdb.sql_connection.TRN.add(sql, values, many=True)
# Execute all the steps
qdb.sql_connection.TRN.execute()
@classmethod
def metadata_headers(cls):
"""Returns metadata headers available
Returns
-------
list
Alphabetical list of all metadata headers available
"""
with qdb.sql_connection.TRN:
sql = """SELECT DISTINCT table_name
FROM information_schema.columns
WHERE table_name LIKE '{0}%' AND
table_name != 'sample_template_filepath' AND
table_name != 'prep_template_filepath' AND
table_name != 'prep_template_sample' AND
table_name != 'prep_template_processing_job' AND
table_name != 'preparation_artifact' AND
table_name != 'prep_template'""".format(
cls._table_prefix)
qdb.sql_connection.TRN.add(sql)
tables = qdb.sql_connection.TRN.execute_fetchflatten()
sql = """SELECT sample_values->>'columns'
FROM qiita.%s WHERE sample_id = '{0}'""".format(
QIITA_COLUMN_NAME)
results = []
for t in tables:
qdb.sql_connection.TRN.add(sql % t)
vals = qdb.sql_connection.TRN.execute_fetchflatten()
if vals:
results.extend(loads(vals[0]))
return list(set(results))
def _common_delete_sample_steps(self, sample_names):
r"""Executes the common delete sample steps
Parameters
----------
sample_names : list of str
The sample names to be erased
Raises
------
QiitaDBUnknownIDError
If any of the `sample_names` don't exist
"""
keys = list(self.keys())
missing = [sn for sn in sample_names if sn not in keys]
if missing:
raise qdb.exceptions.QiitaDBUnknownIDError(
', '.join(missing), self._id)
with qdb.sql_connection.TRN:
# to simplify the sql strings, we are creating a base_sql, which
# will be used to create sql1 and sql2. sql1 will delete the
# sample_names from the main table ([sample | prep]_[id]), then
# sql2 will delete the sample_names from [study | prep]_sample
base_sql = 'DELETE FROM qiita.{0} WHERE sample_id=%s'
sql1 = base_sql.format(self._table_name(self._id))
sql2 = '{0} AND {1}=%s'.format(
base_sql.format(self._table), self._id_column)
for sn in sample_names:
qdb.sql_connection.TRN.add(sql1, [sn])
qdb.sql_connection.TRN.add(sql2, [sn, self.id])
qdb.sql_connection.TRN.execute()
# making sure we don't delete all the samples
qdb.sql_connection.TRN.add(
"SELECT COUNT(*) FROM qiita.{0}".format(
self._table_name(self._id)))
# 1 as the JSON formated tables have an extra "sample" where we
# store the column information
if qdb.sql_connection.TRN.execute_fetchlast() <= 1:
raise ValueError(
'You cannot delete all samples from an information file')
self.generate_files(samples=sample_names)
def delete_column(self, column_name):
"""Delete `column_name` from info file
Parameters
----------
column : str
The column name to be deleted
Raises
------
QiitaDBColumnError
If the `column_name` doesn't exist
QiitaDBOperationNotPermittedError
If a the info file can't be updated
If the column_name is selected as a specimen_id_column in the
study.
"""
if column_name not in self.categories:
raise qdb.exceptions.QiitaDBColumnError(
"'%s' not in info file %d" % (column_name, self._id))
if not self.can_be_updated(columns={column_name}):
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'%s cannot be deleted' % column_name)
# if a tube identifier column is selected disallow its deletion
specimen_id_column = qdb.study.Study(self.study_id).specimen_id_column
if specimen_id_column == column_name:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'"%s" cannot be deleted, this column is currently selected'
' as the tube identifier (specimen_id_column)' %
column_name)
with qdb.sql_connection.TRN:
table_name = 'qiita.{0}{1}'.format(self._table_prefix, self._id)
# deleting from all samples; note that (-) in pgsql jsonb means
# delete that key and value
sql = """UPDATE {0}
SET sample_values = sample_values - %s
WHERE sample_id != %s""".format(table_name)
qdb.sql_connection.TRN.add(sql, [column_name, QIITA_COLUMN_NAME])
# deleting from QIITA_COLUMN_NAME
columns = self.categories
columns.remove(column_name)
values = '{"columns": %s}' % dumps(columns)
sql = """UPDATE {0}
SET sample_values = %s
WHERE sample_id = '{1}'""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
qdb.sql_connection.TRN.execute()
self.generate_files()
def can_be_extended(self, new_samples, new_cols):
"""Whether the template can be updated or not
Parameters
----------
new_samples : list of str
The new samples to be added
new_cols : list of str
The new columns to be added
Returns
-------
bool
Whether the template can be extended or not
str
The error message in case that it can't be extended
Raises
------
QiitaDBNotImplementedError
This method should be implemented in the subclasses
"""
raise qdb.exceptions.QiitaDBNotImplementedError(
"The method 'can_be_extended' should be implemented in "
"the subclasses")
def can_be_updated(self, **kwargs):
"""Whether the template can be updated or not
Returns
-------
bool
Whether the template can be updated or not
Raises
------
QiitaDBNotImplementedError
This method should be implemented in the subclasses
"""
raise qdb.exceptions.QiitaDBNotImplementedError(
"The method 'can_be_updated' should be implemented in "
"the subclasses")
def _common_extend_steps(self, md_template):
r"""executes the common extend steps
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
Returns
-------
list of str
The new samples being added
list of str
The new columns being added
"""
with qdb.sql_connection.TRN:
# Check if we are adding new samples
sample_ids = md_template.index.tolist()
curr_samples = set(self.keys())
existing_samples = curr_samples.intersection(sample_ids)
new_samples = set(sample_ids).difference(existing_samples)
# Check if we are adding new columns
headers = md_template.keys().tolist()
new_cols = set(headers).difference(self.categories)
if not new_cols and not new_samples:
return None, None
is_extendable, error_msg = self.can_be_extended(new_samples,
new_cols)
if not is_extendable:
raise qdb.exceptions.QiitaDBError(error_msg)
table_name = self._table_name(self._id)
if new_cols:
warnings.warn(
"The following columns have been added to the existing"
" template: %s" % ", ".join(sorted(new_cols)),
qdb.exceptions.QiitaDBWarning)
# If we are adding new columns, add them first (simplifies
# code). Sorting the new columns to enforce an order
new_cols = sorted(new_cols)
cols = self.categories
cols.extend(new_cols)
values = dumps({"columns": cols})
sql = """UPDATE qiita.{0}
SET sample_values = %s
WHERE sample_id = '{1}'""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
if existing_samples:
# The values for the new columns are the only ones that get
# added to the database. None of the existing values will
# be modified (see update for that functionality). Remember
# that || is a jsonb to update or add a new key/value
md_filtered = md_template[new_cols].loc[existing_samples]
for sid, df in md_filtered.iterrows():
values = dict(df.items())
sql = """UPDATE qiita.{0}
SET sample_values = sample_values || %s
WHERE sample_id = %s""".format(
self._table_name(self._id))
qdb.sql_connection.TRN.add(sql, [dumps(values), sid])
if new_samples:
warnings.warn(
"The following samples have been added to the existing"
" template: %s" % ", ".join(new_samples),
qdb.exceptions.QiitaDBWarning)
new_samples = sorted(new_samples)
# At this point we only want the information
# from the new samples
md_filtered = md_template.loc[new_samples]
# Insert new samples to the study sample table
values = [[self._id, s_id] for s_id in new_samples]
sql = """INSERT INTO qiita.{0} ({1}, sample_id)
VALUES (%s, %s)""".format(self._table,
self._id_column)
qdb.sql_connection.TRN.add(sql, values, many=True)
# inserting new samples to the info file
values = [(k, row.to_json())
for k, row in md_filtered.iterrows()]
sql = """INSERT INTO qiita.{0} (sample_id, sample_values)
VALUES (%s, %s)""".format(table_name)
qdb.sql_connection.TRN.add(sql, values, many=True)
# Execute all the steps
qdb.sql_connection.TRN.execute()
return new_samples, new_cols
@classmethod
def exists(cls, obj_id):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
obj_id : int
The id to test if it exists on the database
Returns
-------
bool
True if already exists. False otherwise.
"""
cls._check_subclass()
return qdb.util.exists_table(cls._table_name(obj_id))
def _get_sample_ids(self):
r"""Returns all the available samples for the metadata template
Returns
-------
set of str
The set of all available sample ids
"""
with qdb.sql_connection.TRN:
sql = "SELECT sample_id FROM qiita.{0} WHERE {1}=%s".format(
self._table, self._id_column)
qdb.sql_connection.TRN.add(sql, [self._id])
return set(qdb.sql_connection.TRN.execute_fetchflatten())
def __len__(self):
r"""Returns the number of samples in the metadata template
Returns
-------
int
The number of samples in the metadata template
"""
return len(self._get_sample_ids())
def __getitem__(self, key):
r"""Returns the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
Returns
-------
Sample
The sample object for the sample id `key`
Raises
------
KeyError
If the sample id `key` is not present in the metadata template
See Also
--------
get
"""
with qdb.sql_connection.TRN:
if key in self:
return self._sample_cls(key, self)
else:
raise KeyError("Sample id %s does not exists in template %d"
% (key, self._id))
def __setitem__(self, key, value):
r"""Sets the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
value : Sample
The sample obj holding the new sample values
"""
raise qdb.exceptions.QiitaDBNotImplementedError()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise qdb.exceptions.QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the sample ids
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
return iter(self._get_sample_ids())
def __contains__(self, key):
r"""Checks if the sample id `key` is present in the metadata template
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the sample id `key` is in the metadata template, false
otherwise
"""
return key in self._get_sample_ids()
def keys(self):
r"""Iterator over the sorted sample ids
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values
Returns
-------
Iterator
Iterator over Sample obj
"""
with qdb.sql_connection.TRN:
return iter(self._sample_cls(sample_id, self)
for sample_id in self._get_sample_ids())
def items(self):
r"""Iterator over (sample_id, values) tuples, in sample id order
Returns
-------
Iterator
Iterator over (sample_ids, values) tuples
"""
with qdb.sql_connection.TRN:
return iter((sample_id, self._sample_cls(sample_id, self))
for sample_id in self._get_sample_ids())
def get(self, key):
r"""Returns the metadata values for sample id `key`, or None if the
sample id `key` is not present in the metadata map
Parameters
----------
key : str
The sample id
Returns
-------
Sample or None
The sample object for the sample id `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
def _transform_to_dict(self, values):
r"""Transforms `values` to a dict keyed by sample id
Parameters
----------
values : object
The object returned from a execute_fetchall call
Returns
-------
dict
"""
result = {}
for row in values:
# Transform the row to a dictionary
values_dict = dict(row)
# Get the sample id of this row
sid = values_dict['sample_id']
del values_dict['sample_id']
# Remove _id_column from this row (if present)
if self._id_column in values_dict:
del values_dict[self._id_column]
result[sid] = values_dict
return result
def generate_files(self):
r"""Generates all the files that contain data from this template
Raises
------
QiitaDBNotImplementedError
This method should be implemented by the subclasses
"""
raise qdb.exceptions.QiitaDBNotImplementedError(
"generate_files should be implemented in the subclass!")
def to_file(self, fp, samples=None):
r"""Writes the MetadataTemplate to the file `fp` in tab-delimited
format
Parameters
----------
fp : str
Path to the output file
samples : set, optional
If supplied, only the specified samples will be written to the
file
"""
with qdb.sql_connection.TRN:
df = self.to_dataframe()
if samples is not None:
df = df.loc[samples]
# Sorting the dataframe so multiple serializations of the metadata
# template are consistent.
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
# Store the template in a file
df.to_csv(fp, index_label='sample_name', na_rep="", sep='\t',
encoding='utf-8')
def _common_to_dataframe_steps(self, samples=None):
"""Perform the common to_dataframe steps
Returns
-------
pandas DataFrame
The metadata in the template,indexed on sample id
samples list of string, optional
A list of the sample names we actually want to retrieve
"""
with qdb.sql_connection.TRN:
# Retrieve all the information from the database
sql = """SELECT sample_id, sample_values
FROM qiita.{0}
WHERE sample_id != '{1}'""".format(
self._table_name(self._id), QIITA_COLUMN_NAME)
if samples is None:
qdb.sql_connection.TRN.add(sql)
else:
sql += ' AND sample_id IN %s'
qdb.sql_connection.TRN.add(sql, [tuple(samples)])
data = qdb.sql_connection.TRN.execute_fetchindex()
df = pd.DataFrame([d for _, d in data], index=[i for i, _ in data],
dtype=str)
df.index.name = 'sample_id'
df.where((pd.notnull(df)), None)
id_column_name = 'qiita_%sid' % (self._table_prefix)
if id_column_name == 'qiita_sample_id':
id_column_name = 'qiita_study_id'
df[id_column_name] = str(self.id)
return df
def add_filepath(self, filepath, fp_id=None):
r"""Populates the DB tables for storing the filepath and connects the
`self` objects with this filepath"""
with qdb.sql_connection.TRN:
fp_id = self._fp_id if fp_id is None else fp_id
try:
fpp_id = qdb.util.insert_filepaths(
[(filepath, fp_id)], None, "templates",
move_files=False)[0]
sql = """INSERT INTO qiita.{0} ({1}, filepath_id)
VALUES (%s, %s)""".format(self._filepath_table,
self._id_column)
qdb.sql_connection.TRN.add(sql, [self._id, fpp_id])
qdb.sql_connection.TRN.execute()
except Exception as e:
qdb.logger.LogEntry.create(
'Runtime', str(e), info={self.__class__.__name__: self.id})
raise e
def get_filepaths(self):
r"""Retrieves the list of (filepath_id, filepath)"""
with qdb.sql_connection.TRN:
return [(x['fp_id'], x['fp'])
for x in qdb.util.retrieve_filepaths(
self._filepath_table, self._id_column, self.id,
sort='descending')]
@property
def categories(self):
"""Identifies the metadata columns present in an info file
Returns
-------
cols : list
The category fields
"""
return _helper_get_categories(self._table_name(self._id))
def extend(self, md_template):
"""Adds the given template to the current one
Parameters
----------
md_template : DataFrame
The metadata template contents indexed by sample ids
"""
with qdb.sql_connection.TRN:
md_template = self._clean_validate_template(
md_template, self.study_id, current_columns=self.categories)
new_samples, new_columns = self._common_extend_steps(md_template)
if new_samples or new_columns:
self.validate(self.columns_restrictions)
self.generate_files(new_samples, new_columns)
def _update(self, md_template):
r"""Update values in the template
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples ids
Returns
-------
set of str
The samples that were updated
set of str
The columns that were updated
Raises
------
QiitaDBError
If md_template and db do not have the same sample ids
If md_template and db do not have the same column headers
If self.can_be_updated is not True
QiitaDBWarning
If there are no differences between the contents of the DB and the
passed md_template
"""
with qdb.sql_connection.TRN:
# Retrieving current metadata
current_map = self.to_dataframe()
# simple validations of sample ids and column names
samples_diff = set(md_template.index).difference(current_map.index)
if samples_diff:
raise qdb.exceptions.QiitaDBError(
'The new template differs from what is stored '
'in database by these samples names: %s'
% ', '.join(samples_diff))
if not set(current_map.columns).issuperset(md_template.columns):
columns_diff = set(md_template.columns).difference(
current_map.columns)
raise qdb.exceptions.QiitaDBError(
'Some of the columns in your template are not present in '
'the system. Use "extend" if you want to add more columns '
'to the template. Missing columns: %s'
% ', '.join(columns_diff))
# In order to speed up some computation, let's compare only the
# common columns and rows. current_map.columns and
# current_map.index are supersets of md_template.columns and
# md_template.index, respectivelly, so this will not fail
current_map = current_map[
md_template.columns].loc[md_template.index]
# Get the values that we need to change
# diff_map is a DataFrame that hold boolean values. If a cell is
# True, means that the md_template is different from the
# current_map while False means that the cell has the same value
diff_map = current_map != md_template
# ne_stacked holds a MultiIndexed DataFrame in which the first
# level of indexing is the sample_name and the second one is the
# columns. We only have 1 column, which holds if that
# (sample, column) pair has been modified or not (i.e. cell)
ne_stacked = diff_map.stack()
# by using ne_stacked to index itself, we get only the columns
# that did change (see boolean indexing in pandas docs)
changed = ne_stacked[ne_stacked]
if changed.empty:
warnings.warn(
"There are no differences between the data stored in the "
"DB and the new data provided",
qdb.exceptions.QiitaDBWarning)
return None, None
changed.index.names = ['sample_name', 'column']
# the combination of np.where and boolean indexing produces
# a numpy array with only the values that actually changed
# between the current_map and md_template
changed_to = md_template.values[np.where(diff_map)]
# now we are going to take that map and create a new DataFrame
# which is going to have a double level index (sample_id /
# column_name) with a single column 'to'; this will looks something
# like:
# to
# sample_name column
# XX.Sample1 sample_type 6
# XX.Sample2 sample_type 5
# host_subject_id the only one
# XX.Sample3 sample_type 10
# physical_specimen_location new location
to_update = pd.DataFrame({'to': changed_to}, index=changed.index)
# reset_index will expand the multi-index and convert the example
# to:
# sample_name column to
# 0 XX.Sample1 sample_type 6
# 1 XX.Sample2 sample_type 5
# 2 XX.Sample2 host_subject_id the only one
# 3 XX.Sample3 sample_type 10
# 4 XX.Sample3 physical_specimen_location new location
to_update.reset_index(inplace=True)
new_columns = []
samples_updated = []
for sid, df in to_update.groupby('sample_name'):
samples_updated.append(sid)
# getting just columns: column and to, and then using column
# as index will generate this for XX.Sample2:
# to
# column
# sample_type 5
# host_subject_id the only one
df = df[['column', 'to']].set_index('column')
# finally to_dict in XX.Sample2:
# {'to': {'host_subject_id': 'the only one',
# 'sample_type': '5'}}
values = df.to_dict()['to']
new_columns.extend(values.keys())
sql = """UPDATE qiita.{0}
SET sample_values = sample_values || %s
WHERE sample_id = %s""".format(
self._table_name(self._id))
qdb.sql_connection.TRN.add(sql, [dumps(values), sid])
nc = list(set(new_columns).union(set(self.categories)))
table_name = self._table_name(self.id)
values = dumps({"columns": nc})
sql = """UPDATE qiita.{0}
SET sample_values = %s
WHERE sample_id = '{1}'""".format(
table_name, QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql, [values])
qdb.sql_connection.TRN.execute()
return set(samples_updated), set(new_columns)
def update(self, md_template):
r"""Update values in the template
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by samples ids
Raises
------
QiitaDBError
If md_template and db do not have the same sample ids
If md_template and db do not have the same column headers
If self.can_be_updated is not True
QiitaDBWarning
If there are no differences between the contents of the DB and the
passed md_template
"""
with qdb.sql_connection.TRN:
# Clean and validate the metadata template given
new_map = self._clean_validate_template(
md_template, self.study_id, current_columns=self.categories)
samples, columns = self._update(new_map)
self.validate(self.columns_restrictions)
self.generate_files(samples, columns)
def extend_and_update(self, md_template):
"""Performs the update and extend operations at once
Parameters
----------
md_template : DataFrame
The metadata template contents indexed by sample ids
See Also
--------
update
extend
"""
with qdb.sql_connection.TRN:
md_template = self._clean_validate_template(
md_template, self.study_id, current_columns=self.categories)
new_samples, new_columns = self._common_extend_steps(md_template)
samples, columns = self._update(md_template)
if samples is None:
samples = new_samples
elif new_samples is not None:
samples.update(new_samples)
if columns is None:
columns = new_columns
elif new_columns is not None:
columns.update(new_columns)
self.validate(self.columns_restrictions)
self.generate_files(samples, columns)
def update_category(self, category, samples_and_values):
"""Update an existing column
Parameters
----------
category : str
The category to update
samples_and_values : dict
A mapping of {sample_id: value}
Raises
------
QiitaDBUnknownIDError
If a sample_id is included in values that is not in the template
QiitaDBColumnError
If the column does not exist in the table. This is implicit, and
can be thrown by the contained Samples.
"""
with qdb.sql_connection.TRN:
if not set(self.keys()).issuperset(samples_and_values):
missing = set(self.keys()) - set(samples_and_values)
table_name = self._table_name(self._id)
raise qdb.exceptions.QiitaDBUnknownIDError(missing, table_name)
for k, v in samples_and_values.items():
sample = self[k]
if isinstance(v, np.generic):
v = np.asscalar(v)
sample.setitem(category, v)
qdb.sql_connection.TRN.execute()
def get_category(self, category):
"""Returns the values of all samples for the given category
Parameters
----------
category : str
Metadata category to get information for
Returns
-------
dict
Sample metadata for the category in the form {sample_id: value}
Raises
------
QiitaDBColumnError
If category is not part of the template
"""
with qdb.sql_connection.TRN:
if category not in self.categories:
raise qdb.exceptions.QiitaDBColumnError(category)
sql = """SELECT sample_id,
COALESCE(sample_values->>'{0}', 'None') AS {0}
FROM qiita.{1}
WHERE sample_id != '{2}'""".format(
category, self._table_name(self._id), QIITA_COLUMN_NAME)
qdb.sql_connection.TRN.add(sql)
return dict(qdb.sql_connection.TRN.execute_fetchindex())
def check_restrictions(self, restrictions):
"""Checks if the template fulfills the restrictions
Parameters
----------
restrictions : list of Restriction
The restrictions to test if the template fulfills
Returns
-------
set of str
The missing columns
"""
cols = {col for restriction in restrictions
for col in restriction.columns}
return cols.difference(self.categories)
def _get_accession_numbers(self, column):
"""Return the accession numbers stored in `column`
Parameters
----------
column : str
The column name where the accession number is stored
Returns
-------
dict of {str: str}
The accession numbers keyed by sample id
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_id, {0}
FROM qiita.{1}
WHERE {2}=%s""".format(column, self._table,
self._id_column)
qdb.sql_connection.TRN.add(sql, [self.id])
dbresult = qdb.sql_connection.TRN.execute_fetchindex()
result = {sample_id: accession
for sample_id, accession in dbresult}
return result
def _update_accession_numbers(self, column, values):
"""Update accession numbers stored in `column` with the ones in `values`
Parameters
----------
column : str
The column name where the accession number are stored
values : dict of {str: str}
The accession numbers keyed by sample id
Raises
------
QiitaDBError
If a sample in `values` already has an accession number
QiitaDBWarning
If `values` is not updating any accesion number
"""
with qdb.sql_connection.TRN:
sql = """SELECT sample_id, {0}
FROM qiita.{1}
WHERE {2}=%s
AND {0} IS NOT NULL""".format(column, self._table,
self._id_column)
qdb.sql_connection.TRN.add(sql, [self.id])
dbresult = qdb.sql_connection.TRN.execute_fetchindex()
db_vals = {sample_id: accession
for sample_id, accession in dbresult}
common_samples = set(db_vals) & set(values)
diff = [sample for sample in common_samples
if db_vals[sample] != values[sample]]
if diff:
raise qdb.exceptions.QiitaDBError(
"The following samples already have an accession number: "
"%s" % ', '.join(diff))
# Remove the common samples form the values dictionary
values = deepcopy(values)
for sample in common_samples:
del values[sample]
if values:
sql_vals = ', '.join(["(%s, %s)"] * len(values))
sql = """UPDATE qiita.{0} AS t
SET {1}=c.{1}
FROM (VALUES {2}) AS c(sample_id, {1})
WHERE c.sample_id = t.sample_id
AND t.{3} = %s
""".format(self._table, column, sql_vals,
self._id_column)
sql_vals = list(chain.from_iterable(values.items()))
sql_vals.append(self.id)
qdb.sql_connection.TRN.add(sql, sql_vals)
qdb.sql_connection.TRN.execute()
else:
warnings.warn("No new accession numbers to update",
qdb.exceptions.QiitaDBWarning)
def validate(self, restriction_dict):
""" Validate the values in the restricted fields in info files
Parameters
----------
restriction_dict : dict of {str: Restriction}
A dictionary with the restrictions that apply to the metadata
Raises
------
QiitaDBWarning
If the values aren't castable
"""
warning_msg = []
columns = self.categories
wrong_msg = 'Sample "%s", column "%s", wrong value "%s"'
for label, restriction in restriction_dict.items():
missing = set(restriction.columns).difference(columns)
if missing:
warning_msg.append(
"%s: %s" % (restriction.error_msg,
', '.join(sorted(missing))))
else:
valid_null = qdb.metadata_template.constants.EBI_NULL_VALUES
for column, datatype in restriction.columns.items():
# sorting by key (sample id) so we always check in the
# same order, helpful for testing
cats_by_column = self.get_category(column)
for sample in sorted(cats_by_column):
val = cats_by_column[sample]
# ignore if valid null value
if val in valid_null:
continue
# test values
if datatype == datetime:
val = str(val)
formats = [
# 4 digits year
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
'%Y-%m-%d %H', '%Y-%m-%d', '%Y-%m', '%Y'
]
date = None
for fmt in formats:
try:
date = datetime.strptime(val, fmt)
break
except ValueError:
pass
if date is None:
warning_msg.append(
wrong_msg % (sample, column, val))
else:
try:
datatype(val)
except (ValueError, TypeError):
warning_msg.append(
wrong_msg % (sample, column, val))
if warning_msg:
warnings.warn(
"Some functionality will be disabled due to missing "
"columns:\n\t%s.\nSee the Templates tutorial for a description"
" of these fields." % ";\n\t".join(warning_msg),
qdb.exceptions.QiitaDBWarning)
@classmethod
def _identify_forbidden_words_in_column_names(cls, column_names):
"""Return a list of forbidden words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of forbidden words present in the column_names iterable.
"""
return set(cls._forbidden_words) & set(column_names)
@classmethod
def _identify_pgsql_reserved_words_in_column_names(cls, column_names):
"""Return a list of PostgreSQL-reserved words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of reserved words present in the column_names iterable.
References
----------
.. [1] postgresql SQL-SYNTAX-IDENTIFIERS: https://goo.gl/EF0cUV.
"""
return (qdb.metadata_template.util.get_pgsql_reserved_words() &
set(column_names))
@classmethod
def _identify_column_names_with_invalid_characters(cls, column_names):
"""Return a list of invalid words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of words containing invalid (illegal) characters.
"""
valid_initial_char = ascii_letters
valid_rest = set(ascii_letters+digits+'_:|')
invalid = []
for s in column_names:
if s[0] not in valid_initial_char:
invalid.append(s)
elif set(s) - valid_rest:
invalid.append(s)
return set(invalid)
@classmethod
def _identify_qiime2_reserved_words_in_column_names(cls, column_names):
"""Return a list of QIIME2-reserved words found in column_names.
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Returns
------
set of words containing QIIME2-reserved words.
"""
return (qdb.metadata_template.util.get_qiime2_reserved_words() &
set(column_names))
@property
def restrictions(cls):
r"""Retrieves the restrictions based on the class._table
Returns
-------
dict
{restriction: values, ...}
"""
with qdb.sql_connection.TRN:
sql = """SELECT name, valid_values
FROM qiita.restrictions
WHERE table_name = %s"""
qdb.sql_connection.TRN.add(sql, [cls._table])
return dict(qdb.sql_connection.TRN.execute_fetchindex())
def validate_restrictions(self):
r"""Validates the restrictions
Returns
-------
success, boolean
If the validation was successful
message, string
Message if success is not True
"""
with qdb.sql_connection.TRN:
# [:-1] removing last _
name = '%s %d' % (self._table_prefix[:-1], self.id)
success = True
message = []
restrictions = self.restrictions
categories = self.categories
difference = sorted(set(restrictions.keys()) - set(categories))
if difference:
success = False
message.append(
'%s is missing columns "%s"' % (name, ', '.join(
difference)))
to_review = set(restrictions.keys()) & set(categories)
for key in to_review:
info_vals = set(self.get_category(key).values())
msg = []
for v in info_vals:
if v not in restrictions[key]:
msg.append(v)
if msg:
success = False
message.append(
'%s has invalid values: "%s", valid values are: '
'"%s"' % (name, ', '.join(msg),
', '.join(restrictions[key])))
return success, '\n'.join(message)
| bsd-3-clause |
jmmease/pandas | pandas/tests/indexing/test_ix.py | 7 | 12420 | """ test indexing with ix """
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.errors import PerformanceWarning
class TestIX(object):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
assert result == expected
else:
assert expected.equals(result)
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
pytest.raises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
pytest.raises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: np.nan,
4: np.nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.loc[:, 'B'].copy()
df.loc[:, 'B'] = df.loc[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.loc[indexer, 'y'] = v
assert expected.loc[indexer, 'y'] == v
df.loc[df.x % 2 == 0, 'y'] = df.loc[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.loc[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.loc[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].loc[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
with catch_warnings(record=True):
assert df.ix['e', 8] == 2
assert df.loc['e', 8] == 2
with catch_warnings(record=True):
df.ix['e', 8] = 42
assert df.ix['e', 8] == 42
assert df.loc['e', 8] == 42
df.loc['e', 8] = 45
with catch_warnings(record=True):
assert df.ix['e', 8] == 45
assert df.loc['e', 8] == 45
def test_ix_slicing_strings(self):
# see gh-3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
with catch_warnings(record=True):
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_ix_setitem_out_of_bounds_axis_0(self):
df = pd.DataFrame(
np.random.randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
with catch_warnings(record=True):
pytest.raises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_ix_setitem_out_of_bounds_axis_1(self):
df = pd.DataFrame(
np.random.randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
with catch_warnings(record=True):
pytest.raises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_ix_empty_list_indexer_is_ok(self):
with catch_warnings(record=True):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_duplicate_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
with catch_warnings(record=True):
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
| bsd-3-clause |
kaushik94/tardis | tardis/io/parsers/blondin_toymodel.py | 1 | 4805 | import re
import yaml
import numpy as np
import pandas as pd
from astropy import units as u
from tardis.util.base import parse_quantity
PATTERN_REMOVE_BRACKET = re.compile(r"\[.+\]")
T0_PATTERN = re.compile("tend = (.+)\n")
def read_blondin_toymodel(fname):
"""
Reading the Blondin toy-model format and returns a dictionary and a
dataframe
Parameters
----------
fname: str
path or filename to blondin toymodel
Returns
-------
blondin_dict: dict
dictionary containing most of the meta data of the model
blondin_csv: pandas.DataFrame
DataFrame containing the csv part of the toymodel
"""
with open(fname, "r") as fh:
for line in fh:
if line.startswith("#idx"):
break
else:
raise ValueError(
"File {0} does not conform to Toy Model format as it does "
"not contain #idx"
)
columns = [
PATTERN_REMOVE_BRACKET.sub("", item) for item in line[1:].split()
]
raw_blondin_csv = pd.read_csv(
fname, delim_whitespace=True, comment="#", header=None, names=columns
)
raw_blondin_csv.set_index("idx", inplace=True)
blondin_csv = raw_blondin_csv.loc[
:,
[
"vel",
"dens",
"temp",
"X_56Ni0",
"X_Ti",
"X_Ca",
"X_S",
"X_Si",
"X_O",
"X_C",
],
]
rename_col_dict = {
"vel": "velocity",
"dens": "density",
"temp": "t_electron",
}
rename_col_dict.update({item: item[2:] for item in blondin_csv.columns[3:]})
rename_col_dict["X_56Ni0"] = "Ni56"
blondin_csv.rename(columns=rename_col_dict, inplace=True)
blondin_csv.iloc[:, 3:] = blondin_csv.iloc[:, 3:].divide(
blondin_csv.iloc[:, 3:].sum(axis=1), axis=0
)
# changing velocities to outer boundary
new_velocities = 0.5 * (
blondin_csv.velocity.iloc[:-1].values
+ blondin_csv.velocity.iloc[1:].values
)
new_velocities = np.hstack(
(new_velocities, [2 * new_velocities[-1] - new_velocities[-2]])
)
blondin_csv["velocity"] = new_velocities
with open(fname, "r") as fh:
t0_string = T0_PATTERN.findall(fh.read())[0]
t0 = parse_quantity(t0_string.replace("DAYS", "day"))
blondin_dict = {}
blondin_dict["model_density_time_0"] = str(t0)
blondin_dict["description"] = "Converted {0} to csvy format".format(fname)
blondin_dict["tardis_model_config_version"] = "v1.0"
blondin_dict_fields = [
dict(
name="velocity",
unit="km/s",
desc="velocities of shell outer bounderies.",
)
]
blondin_dict_fields.append(
dict(name="density", unit="g/cm^3", desc="mean density of shell.")
)
blondin_dict_fields.append(
dict(name="t_electron", unit="K", desc="electron temperature.")
)
for abund in blondin_csv.columns[3:]:
blondin_dict_fields.append(
dict(name=abund, desc="Fraction {0} abundance".format(abund))
)
blondin_dict["datatype"] = {"fields": blondin_dict_fields}
return blondin_dict, blondin_csv
def convert_blondin_toymodel(
in_fname, out_fname, v_inner, v_outer, conversion_t_electron_rad=None
):
"""
Parameters
----------
in_fname: str
input toymodel file
out_fname: str
output csvy file
conversion_t_electron_rad: float or None
multiplicative conversion factor from t_electron to t_rad.
if `None` t_rad is not calculated
v_inner: float or astropy.unit.Quantity
inner boundary velocity. If float will be interpreted as km/s
v_outer: float or astropy.unit.Quantity
outer boundary velocity. If float will be interpreted as km/s
"""
blondin_dict, blondin_csv = read_blondin_toymodel(in_fname)
blondin_dict["v_inner_boundary"] = str(u.Quantity(v_inner, u.km / u.s))
blondin_dict["v_outer_boundary"] = str(u.Quantity(v_outer, u.km / u.s))
if conversion_t_electron_rad is not None:
blondin_dict["datatype"]["fields"].append(
{
"desc": "converted radiation temperature "
"using multiplicative factor={0}".format(
conversion_t_electron_rad
),
"name": "t_rad",
"unit": "K",
}
)
blondin_csv["t_rad"] = (
conversion_t_electron_rad * blondin_csv.t_electron
)
csvy_file = "---\n{0}\n---\n{1}".format(
yaml.dump(blondin_dict, default_flow_style=False),
blondin_csv.to_csv(index=False),
)
with open(out_fname, "w") as fh:
fh.write(csvy_file)
| bsd-3-clause |
anmolgarg/plotbox | plotbox/utils.py | 1 | 3486 | import numpy as np
import pandas as pd
import datetime
import dateutil.parser as dp
def get_config():
import plotly
cred = plotly.tools.get_credentials_file()
config = plotly.tools.get_config_file()
return cred, config
def hide_input_cells():
from IPython.display import HTML
return HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
def df_to_arrays(df, cols):
'''function to easily convert data types
Takes a df and a list of column names, returns a list of arrays in same order
'''
arrays = [df[c] for c in cols]
return arrays
def arrays_to_df(arrays, names=None):
'''function to easily convert data types
Takes a list of arrays, returns a pandas dataframe in same order
'''
df = pd.DataFrame(arrays).T
if names:
df.columns = names
return df
def mreplace(s, dic):
for i, j in dic.iteritems():
s = s.replace(i, j)
return s
def extract_date(date_input, to_pandas=True):
'''
Converts any date input into a *datetime* object
Parameters
----------
date_input : int(s), float(s), or string(s)
Returns
-------
date : *datetime* object(s)
'''
if isinstance(date_input, (list, pd.core.series.Series, np.ndarray)):
return [extract_date(d, to_pandas=to_pandas) for d in date_input]
if isinstance(date_input, (int, float)):
if pd.isnull(date_input):
return np.nan
date = from_unixtime(date_input)
if isinstance(date, datetime.datetime) == False:
date = dp.parse(str(date), fuzzy=True)
if to_pandas:
date = pd.to_datetime(date)
return date
def from_unixtime(date_input):
'''
Given a unix timestamp in seconds, milliseconds, microseconds, or nanoseconds from 1-Jan-1970:
returns a datetime.datetime object.
If the timestamp is not covertable to float, the method will pass and return the input as given
Parameters
----------
date_input : int, float, or string
Returns
-------
date : *datetime* object
'''
try:
timestamp = float(timestamp)
digits = number.count_digits(timestamp)
if digits <= 5:
# convert excel date to datetime
base = datetime.datetime(1900, 1, 1)
delta = datetime.timedelta(days=timestamp)
timestamp = base + delta
else:
base = datetime.datetime(1970, 1, 1)
if (digits > 5) and (digits <= 10):
# convert from seconds
timestamp_s = timestamp
elif (digits > 10) and (digits <= 13):
# convert from milla-seconds
timestamp_s = timestamp * 1e-3
elif (digits > 13) and (digits <= 16):
# convert from micro-seconds
timestamp_s = timestamp * 1e-6
elif (digits > 16) and (digits <= 19):
# is already nanoseconds
timestamp_s = timestamp * 1e-9
delta = datetime.timedelta(seconds=timestamp_s)
date = base + delta
except:
pass
return date | mit |
qalhata/Python-Scripts-Repo-on-Data-Science | DataTidy_Analysis2.py | 1 | 3330 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 10 10:02:49 2017
@author: Shabaka
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# ' Data Reshape wit melt ''#
# Print the head of airquality
print(airquality.head())
# Melt airquality: airquality_melt
airquality_melt = pd.melt(airquality, id_vars=['Month', 'Day'])
# Print the head of airquality_melt
print(airquality_melt.head())
# ''''Customise melted Data - Change var name & Val'''#
# Print the head of airquality
print(airquality.head())
# Melt airquality: airquality_melt
airquality_melt = pd.melt(airquality, id_vars=['Month', 'Day'],
var_name='measurement', value_name='reading')
# Print the head of airquality_melt
print(airquality_melt.head())
#''' Pivoting Data''' from melt'''''''''#
# Print the head of airquality_melt
print(airquality_melt.head())
# Pivot airquality_melt: airquality_pivot
airquality_pivot = airquality_melt.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading')
# Print the head of airquality_pivot
print(airquality_pivot.head())
#''''''''''''''''Reset data frame index''''''''''''#
# Print the index of airquality_pivot
print(airquality_pivot.index)
# Reset the index of airquality_pivot: airquality_pivot
airquality_pivot = airquality_pivot.reset_index()
# Print the new index of airquality_pivot
print(airquality_pivot.index)
# Print the head of airquality_pivot
print(airquality_pivot.head())
# ''''''' Pivoting Duplicate Values ''''''''''#
# Pivot airquality_dup: airquality_pivot
airquality_pivot = airquality_dup.pivot_table(index=['Month', 'Day'],
columns='measurement',
values='reading', aggfunc=np.mean)
# Reset the index of airquality_pivot
airquality_pivot = airquality_pivot.reset_index()
# Print the head of airquality_pivot
print(airquality_pivot.head())
# Print the head of airquality
print(airquality.head())
# '''''''''Split column infor using str '''''#
# Melt tb: tb_melt
tb_melt = pd.melt(frame=tb, id_vars=['country', 'year'])
# Create the 'gender' column
tb_melt['gender'] = tb_melt.variable.str[0]
# Create the 'age_group' column
tb_melt['age_group'] = tb_melt.variable.str[1:]
# '''''' Split a column with .split() and .get()
# Melt ebola: ebola_melt
ebola_melt = pd.melt(ebola, id_vars=['Date', 'Day'], var_name='type_country', value_name='counts')
# Create the 'str_split' column
ebola_melt['str_split'] = ebola_melt.type_country.str.split('_')
# Create the 'type' column
ebola_melt['type'] = ebola_melt.str_split.str.get(0)
# Create the 'country' column
ebola_melt['country'] = ebola_melt.str_split.str.get(1)
# Print the head of ebola_melt
print(ebola_melt.head())
# ''''Combining Rows of Data ''''''''''''#
# Concatenate uber1, uber2, and uber3: row_concat
row_concat = pd.concat([uber1, uber2, uber3])
# Print the shape of row_concat
print(row_concat.shape)
# Print the head of row_concat
print(row_concat.head())
#'''''''''''' cOMBINING cOLUMNS OF dATA'''''''''''#
# Concatenate ebola_melt and status_country column-wise: ebola_tidy
ebola_tidy = pd.concat([ebola_melt, status_country], axis=1)
# Print the shape of ebola_tidy
print(ebola_tidy.shape)
# Print the head of ebola_tidy
print(ebola_tidy.head())
| gpl-3.0 |
Udzu/pudzu | dataviz/eulanguages.py | 1 | 4330 | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
df = pd.read_csv("datasets/eulanguages.csv").set_index("country")
# Map 0
cmap = sns.cubehelix_palette(start=.5, rot=-.75, as_cmap=True)
def colorfn0(c):
if c in ['Sea', 'Borders']: return "white"
elif c not in df.index: return "#AAAAAA"
return RGBA(cmap(df.trilingual[c] / 100))
def labelfn0(c, w, h):
if c not in df.index: return None
return Image.from_text_bounded("{}%".format(df.trilingual[c]), (w, h), 24, papply(arial, bold=True), fg="black", padding=(0,0,0,2))
map0 = map_chart("maps/Europe.png", colorfn0, labelfn0)
gradient0 = Image.from_gradient(cmap, (40, 200), direction=(0,-1)).add_grid((1,5))
legend0 = generate_legend([gradient0], [["{}%".format(i*20) for i in reversed(range(6))]], header="Trilingualism", footer="percentage of the population that can converse in at least 2 foreign languages.", max_width=150)
chart0 = map0.place(legend0, align=(1,0), padding=10)
tchart0 = Image.from_column([Image.from_text("Prevalence of trilingualism".upper(), arial(48, bold=True)), chart0], bg="white", padding=5)
# Map 1
categories = ["English", "French", "Russian", "German", "Spanish"]
othercol = "#808080"
@artial(ignoring_exceptions, othercol)
def catcol(c): return VegaPalette10[categories.index(c.title())]
def colorfn1(c):
if c in ['Sea', 'Borders']: return "white"
elif c not in df.index: return "#AAAAAA"
return Diamond(8, catcol(df.language2[c]), catcol(df.language1[c])).pad(2, catcol(df.language1[c]))
map1 = map_chart("maps/Europe.png", colorfn1)
others = Image.from_column([Image.from_text("Other", arial(16)),
Image.from_text("\n".join([
"Czech: Slovakian",
"Ireland: Irish",
"Malta: Italian",
"Slovenia: Croatian",
"Slovakia: Czech"
]), arial(10), padding=(0,2))], xalign=0)
legend1 = generate_legend(VegaPalette10[:len(categories)] + [othercol],
categories + [others],
[(40,40)] * len(categories) + [(40,...)],
header="Languages", footer="Background colour shows most spoken foreign language; diamonds show second most.", max_width=150)
chart1 = map1.place(legend1, align=(1,0), padding=10)
tchart1 = Image.from_column([Image.from_text("Most spoken foreign languages".upper(), arial(48, bold=True)), chart1], bg="white", padding=5)
# Map 2
categories2 = [ "holiday", "films", "friends", "internet", "work" ]
descriptions2 = [ "on holiday", "films, TV, etc", "with friends", "on the internet", "at work" ]
def colorfn2(c):
if c in ['Sea', 'Borders']: return "white"
elif c not in df.index: return "#aaaaaa"
return VegaPalette10[categories2.index(df.use[c])]
map2 = map_chart("maps/Europe.png", colorfn2)
legend2 = generate_legend(VegaPalette10[:len(categories2)], descriptions2, header="Most common use")
chart2 = map2.place(legend2, align=(1,0), padding=10)
tchart2 = Image.from_column([Image.from_text("Most common foreign language use".upper(), arial(48, bold=True)), chart2], bg="white", padding=5)
# Map 3
categories3 = [ "work", "work abroad", "holiday" ]
descriptions3 = [ "work", "working abroad", "holidays" ]
def colorfn3(c):
if c in ['Sea', 'Borders']: return "white"
elif c not in df.index: return "#aaaaaa"
return VegaPalette10[categories3.index(df.advantage[c])]
map3 = map_chart("maps/Europe.png", colorfn3)
legend3 = generate_legend(VegaPalette10[:len(categories3)], descriptions3, header="Main advantage")
chart3 = map3.place(legend3, align=(1,0), padding=10)
tchart3 = Image.from_column([Image.from_text("Biggest perceived language advantage".upper(), arial(48, bold=True)), chart3], bg="white", padding=5)
# Put them all together
chart = Image.from_array([[tchart0, tchart1], [tchart2, tchart3]])
img = Image.from_column([Image.from_text("Parlez-Sie English, amigo?".upper(), arial(96, bold=True)),
Image.from_text("EU foreign language use according to Special Eurobarometer 386 (2012)", arial(60, italics=True)),
chart], padding=10, bg="white")
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img = img.resize_fixed_aspect(scale=0.5)
img.save("output/eulanguages.png")
| mit |
winklerand/pandas | pandas/core/panel.py | 1 | 57627 | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import numpy as np
import warnings
from pandas.core.dtypes.cast import (
infer_dtype_from_scalar,
cast_scalar_to_array,
maybe_cast_item)
from pandas.core.dtypes.common import (
is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.core.dtypes.missing import notna
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict)
from pandas.compat.numpy import function as nv
from pandas.core.common import (_try_sort, _default_index, _all_not_none,
_any_not_none, _apply_if_callable)
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_objs_combined_axis)
from pandas.io.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.core.reshape.util import cartesian_product
from pandas.util._decorators import (deprecate, Appender)
from pandas.util._validators import validate_axis_style_args
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}",
optional_mapper='', optional_axis='', optional_labels='')
_shared_doc_kwargs['args_transpose'] = (
"three positional arguments: each one of\n{ax_single}".format(
ax_single=_shared_doc_kwargs['axes_single_arg']))
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
# deprecation GH13563
warnings.warn("\nPanel is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of 3-dimensional data are with a "
"MultiIndex on a DataFrame, via the "
"Panel.to_frame() method\n"
"Alternatively, you can use the xarray package "
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
DeprecationWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if _any_not_none(*passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and _all_not_none(*passed_axes):
values = cast_scalar_to_array([len(x) for x in passed_axes],
data, dtype=dtype)
mgr = self._init_matrix(values, passed_axes, dtype=values.dtype,
copy=False)
copy = False
else: # pragma: no cover
raise ValueError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
from collections import defaultdict
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(OrderedDict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = _apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.loc[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to '
'{datatype}'.format(datatype=dtype))
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func, try_cast=True):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
dims = u('Dimensions: {dimensions}'.format(dimensions=' x '.join(
["{shape} ({axis})".format(shape=shape, axis=axis) for axis, shape
in zip(self._AXIS_ORDERS, self.shape)])))
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('{ax} axis: {x} to {y}'.format(ax=a.capitalize(),
x=pprint_thing(v[0]),
y=pprint_thing(v[-1])))
else:
return u('{ax} axis: None'.format(ax=a.capitalize()))
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_array()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(*args, **kwargs)
def _get_value(self, *args, **kwargs):
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower._get_value(*args[1:], takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(*args, **kwargs)
def _set_value(self, *args, **kwargs):
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower._set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
maybe_cast_item(result, args[0], likely_dtype)
return result._set_value(*args)
_set_value.__doc__ = set_value.__doc__
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = _apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
mat = cast_scalar_to_array(shape[1:], value)
else:
raise TypeError('Cannot set item of '
'type: {dtype!s}'.format(dtype=type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
from pandas.io.pickle import _unpickle_array
_unpickle = _unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notna(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError(
"{otype!s} is not supported in combine operation with "
"{selftype!s}".format(otype=type(other), selftype=type(self)))
def _combine_const(self, other, func, try_cast=True):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0, try_cast=True):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func, try_cast=True):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notna(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple(p[i] for p in points)
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise ValueError('invalid _construct_return_type [self->{self}] '
'[result->{result}]'.format(self=self, result=result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, *args, **kwargs):
major = kwargs.pop("major", None)
minor = kwargs.pop('minor', None)
if major is not None:
if kwargs.get("major_axis"):
raise TypeError("Cannot specify both 'major' and 'major_axis'")
kwargs['major_axis'] = major
if minor is not None:
if kwargs.get("minor_axis"):
raise TypeError("Cannot specify both 'minor' and 'minor_axis'")
kwargs['minor_axis'] = minor
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
if self.ndim >= 4:
# Hack for PanelND
axes = {}
kwargs.update(axes)
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(Panel, self).reindex(**kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict((self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes))
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict((a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect)))
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_objs_combined_axis(data.values(), axis=axis,
intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on '
'axis {ax}'.format(ax=axis))
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y,
errors='raise',
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
{desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``.
Parameters
----------
other : {construct} or {cls_name}
axis : {{{axis_order}}}
Axis to broadcast over
Returns
-------
{cls_name}
See also
--------
{cls_name}.{reverse}\n"""
doc = _op_doc.format(
desc=op_desc['desc'], op_name=op_name, equiv=equiv,
construct=cls._constructor_sliced.__name__,
cls_name=cls.__name__, reverse=op_desc['reverse'],
axis_order=', '.join(cls._AXIS_ORDERS))
else:
# doc strings substitors
_agg_doc = """
Wrapper method for {wrp_method}
Parameters
----------
other : {construct} or {cls_name}
axis : {{{axis_order}}}
Axis to broadcast over
Returns
-------
{cls_name}\n"""
doc = _agg_doc.format(
construct=cls._constructor_sliced.__name__,
cls_name=cls.__name__, wrp_method=name,
axis_order=', '.join(cls._AXIS_ORDERS))
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| bsd-3-clause |
wasit7/recognition | pub/ss.py | 2 | 10760 | """
GNU GENERAL PUBLIC LICENSE Version 2
Created on Tue Oct 14 18:52:01 2014
@author: Wasit
"""
import numpy as np
import os
from PIL import Image
from scipy.ndimage import filters
try:
import json
except ImportError:
import simplejson as json
#1800
num_img=100
spi=5
rootdir="dataset"
mrec=64
mtran=64
margin=mrec+mtran
class dataset:
def __init__(self):
'''
To create and initialise
self.dimtheta--(m)dimension of theta. theta is a column vector
self.size------(n)number of samples in the root bag
self.I---------prepocessed data
self.samples---the marix which has size of [(p+1)xn],
where p is size of vector that identify location
of a sample in self.I.
Note that the fist row of self.sample is label
'''
#1 self.cmax: maximum number of classes
self.clmax=520
#2 self.spi: number of samples per image
self.spi=spi
#3 self.dim_theta: [r1,c1,r2,c2,bin]^T
self.dim_theta=5
self.dim_bin=2
#loading json files
jsf=[]
for root, dirs, files in os.walk(rootdir):
for subdir in dirs:
for iroot,idirs,ifiles in os.walk(os.path.join(root,subdir)):
for f in ifiles:
if f.endswith('json'):
jsf.append(os.path.join(iroot,f))
#set sampling-rate here
self.jsonfiles=[jsf[i] for i in np.random.permutation(len(jsf))[0:num_img]]
print "len(self.jsonfiles)=%d"%len(self.jsonfiles)
#4 self.size: number of all samples in the root bag
self.size=len(self.jsonfiles)*self.spi;
#5 self.I: the data
#6 self.samples: samples[x]=[class,img, row, column]^T
self.I=[]
self.samples=np.zeros((4,self.size),dtype=np.uint)
for i, jf in enumerate(self.jsonfiles):
#self.I: the data
f=open(jf,"r")
js=json.loads(f.read())
f.close()
##init and show
img_path=''
if js['path'][0:2]=='./':
img_path= rootdir + js['path'][1:]
elif js['path'][0]=='/':
img_path= rootdir + js['path']
else:
img_path= rootdir + '/' +js['path']
#print(img_path)
im=np.array(Image.open(img_path).convert('L'))
rmax,cmax=im.shape
#imx and imy are graduent images in x and y directions
imx = np.zeros(im.shape)
imy = np.zeros(im.shape)
#sigma for gausian window
sigma=2
filters.gaussian_filter(im, (sigma,sigma), (0,1), imx)
filters.gaussian_filter(im, (sigma,sigma), (1,0), imy)
#categorise directions of gradient into 4 groups (sw,se,nw and ne)
I_tem=np.zeros((rmax,cmax,self.dim_bin),dtype=np.uint16)
# g= (0<imx).astype(np.uint16) + 2*(0<imy).astype(np.uint16)
# I_tem[:,:,0] = (g[:,:]==0).astype(np.uint16).cumsum(0).cumsum(1)
# I_tem[:,:,1] = (g[:,:]==1).astype(np.uint16).cumsum(0).cumsum(1)
# I_tem[:,:,2] = (g[:,:]==2).astype(np.uint16).cumsum(0).cumsum(1)
# I_tem[:,:,3] = (g[:,:]==3).astype(np.uint16).cumsum(0).cumsum(1)
I_tem[:,:,0] = (0<imx).astype(np.uint16).cumsum(0).cumsum(1)
I_tem[:,:,1] = (0<imy).astype(np.uint16).cumsum(0).cumsum(1)
self.I.append(I_tem)
#samples[x]=[class,img, row, column]^T
ki=i*self.spi
kf=ki+self.spi
#image index
self.samples[1,ki:kf]=i
#row
r=np.random.randint(margin,rmax-margin,self.spi)
self.samples[2,ki:kf]=r;
#column
c=np.random.randint(margin,cmax-margin,self.spi)
self.samples[3,ki:kf]=c;
#label
self.samples[0,ki:kf]=0
for s in range(self.spi):
for lb in js['labels']:
r1=lb['y']
r2=r1+lb['h']
c1=lb['x']
c2=c1+lb['w']
if r1<=r[s] and r[s]<r2 and c1<=c[s] and c[s]<c2:
#print("l:{} r,c:{},{}-->{},{},{},{}".format(lb['label'],r[s],c[s],r1,r2,c1,c2))
#label
self.samples[0,ki+s]=lb['label']
#self.I=np.array(self.I)
self.samples=self.samples.astype(np.uint16)
def __del__(self):
del self.clmax
del self.spi
del self.size
del self.I
del self.samples
def getX(self):
'''
input:
void
output:
[1D ndarray dtype=np.uint32]
'''
# return np.arange(0, self.size, dtype=np.uint32)
# return np.random.randint(0,self.size,size=self.size)
return np.random.permutation(self.size)
def getL(self,x):
'''
input:
[1D ndarray dtype=np.uint32]
output:
[1D ndarray dtype=np.uint32]
'''
return self.samples[0,x]
def setL(self,x,L):
self.samples[0,x]=L
def getIs(self,thetas,x):
'''
input:
x: [1D ndarray dtype=np.uint32]\n
thetas: [2D ndarray float]
output:
[1D ndarray dtype=np.uint32]
Description:
In spiral case, it uses only first row of the thetas
'''
#dataset.getParam() calls this
#theta and x have same number of column
#3 self.dim_theta: [0_r1, 1_c1, 2_r2, 3_c2, 4_bin]^T
# r1,r2 {margin~rmax-margin},
# c1,c2 {margin~cmax-margin},
# bin {0~3}
# L1(r1c1)----L2(r1c2)
# | |
# L3(r2c1)----L4(r2c2)
##########
#6 self.samples: samples[x]=[0_class, 1_img, 2_row, 3_column]^T
r1=self.samples[2,x]+thetas[0,:]
c1=self.samples[3,x]+thetas[1,:]
r2=self.samples[2,x]+thetas[2,:]
c2=self.samples[3,x]+thetas[3,:]
bins=thetas[self.dim_theta-1,:]
f=np.zeros(len(x))
for i,ix in enumerate(x):
img=self.samples[1,ix]
L1=self.I[img][r1[i],c1[i],bins[i]]
L2=self.I[img][r1[i],c2[i],bins[i]]
L3=self.I[img][r2[i],c1[i],bins[i]]
L4=self.I[img][r2[i],c2[i],bins[i]]
f[i]=float(L4+L1-L2-L3)
return f
def getI(self,theta,x):
'''
input:
x: [1D ndarray dtype=np.uint32]\n
theta: [1D ndarray float]
output:
[1D ndarray dtype=np.uint32]
Description:
In spiral case, it uses only first row of the thetas
'''
#engine.getQH() call this
r1=self.samples[2,x]+theta[0]
c1=self.samples[3,x]+theta[1]
r2=self.samples[2,x]+theta[2]
c2=self.samples[3,x]+theta[3]
bins=theta[self.dim_theta-1]
f=np.zeros(len(x))
for i,ix in enumerate(x):
img=self.samples[1,ix]
L1=self.I[img][r1[i],c1[i],bins]
L2=self.I[img][r1[i],c2[i],bins]
L3=self.I[img][r2[i],c1[i],bins]
L4=self.I[img][r2[i],c2[i],bins]
f[i]=float(L4+L1-L2-L3)
return f
def getParam(self,x):
'''
input:
x: [1D ndarray dtype=np.uint32]
output:
thetas: [2D ndarray float] rmax=dim_theta, cmax=len(x)
taus: [1D ndarray dtype=np.uint32]
Description:
In spiral case, it uses only first row of the thetas
'''
#3 self.dim_theta: [0_r1, 1_c1, 2_r2, 3_c2, 4_bin]^T
#6 self.samples: samples[x]=[0_class, 1_img, 2_row, 3_column]^T
n_proposal=100
if len(x)>n_proposal:
x=np.random.permutation(x)[:n_proposal]
ux=np.random.randint(-mtran,mtran,size=len(x))
uy=np.random.randint(-mtran,mtran,size=len(x))
hx=np.random.randint(8,mrec,size=len(x))
hy=np.random.randint(8,mrec,size=len(x))
bins=np.random.randint(0,self.dim_bin,size=len(x))
thetas=np.zeros((self.dim_theta,len(x)))
thetas[0,:]=ux-hx
thetas[1,:]=uy-hy
thetas[2,:]=ux+hx
thetas[3,:]=uy+hy
thetas[self.dim_theta-1,:]=bins
thetas.astype(int)
taus = self.getIs(thetas, x)
return thetas,taus
def show(self):
import matplotlib.pyplot as plt
print("number of images: {}".format(len(self.I)))
for i in xrange(len(self.jsonfiles)):
f=open(self.jsonfiles[i],"r")
js=json.loads(f.read())
f.close()
##init and show
img_path=''
if js['path'][0:2]=='./':
img_path= rootdir + js['path'][1:]
elif js['path'][0]=='/':
img_path= rootdir + js['path']
else:
img_path= rootdir + '/' +js['path']
print(img_path)
im=np.array(Image.open(img_path).convert('L'))
plt.hold(False)
plt.imshow(im)
plt.hold(True)
for j in range(self.size):
#samples[x]=[0_class,1_img, 2_row, 3_column]^T
if self.samples[1,j]==i:
plt.text(self.samples[3,j], self.samples[2,j], "%03d"%self.samples[0,j], fontsize=12,color='red')
#plt.plot(self.samples[3,j],self.samples[2,j],markers[self.samples[0,j]])
plt.set_cmap('gray')
plt.show()
plt.ginput(1)
plt.close('all')
if __name__ == '__main__':
# import matplotlib.pyplot as plt
dset=dataset()
x=dset.getX()
# print("number of images: {}".format(len(dset.I)))
# markers=['ko','ro','go','bo','po']
# for i in xrange(len(dset.jsonfiles)):
# f=open(dset.jsonfiles[i],"r")
# js=json.loads(f.read())
# f.close()
# img_path= rootdir + js['path'][1:]
# print(img_path)
# im=np.array(Image.open(img_path).convert('L'))
# plt.hold(False)
# plt.imshow(im)
# plt.hold(True)
# for j in range(dset.size):
# #samples[x]=[0_class,1_img, 2_row, 3_column]^T
# if dset.samples[1,j]==i:
# plt.plot(dset.samples[3,j],dset.samples[2,j],markers[dset.samples[0,j]])
# plt.set_cmap('gray')
# plt.show()
# plt.ginput()
# plt.close('all')
#-- | gpl-2.0 |
clemkoa/scikit-learn | examples/cluster/plot_color_quantization.py | 24 | 3360 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
sgenoud/scikit-learn | examples/linear_model/plot_iris_logistic.py | 5 | 1646 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
ak681443/mana-deep | ae_tries/new_train_dae.py | 1 | 4047 | from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras import regularizers
import tensorflow as tf
tf.python.control_flow_ops = tf
import os
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
import random
random.seed(9)
input_img = Input(shape=(224, 224,1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((1, 1), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((1, 1))(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
mypath = '/home/arvind/tt/cleaned_data/train/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
images = []
mask = np.zeros((224,224))
for filen in files:
img = cv2.imread(mypath+filen)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img[img<30] = 101
img[img<100] = 1
img[img>=100] = 0
mask = mask + img
img[img>0] = 1
images.append(np.array([img]))
images_train = np.array(images[:-100])
images_test = np.array(images[-100:])
images_train = images_train.astype('float32')#/ 255. #float(np.max(images_train))
images_test = images_test.astype('float32')#/ 255. #float(np.max(images_test))
images_train_masked = []
images_test_masked = []
av = np.average(mask)
for img in images_test:
_img = np.reshape(np.copy(img),(224,224))
_img[mask>av] = 0
images_test_masked.append(np.array([_img]))
for img in images_train:
_img = np.reshape(np.copy(img),(224,224))
_img[mask>av] = 0
images_train_masked.append(np.array([_img]))
#print np.max(images_train_op[0])
#plt.imshow(np.reshape(images_train_op[50],(224,224)))
#plt.show()
# images_train_op = np.array(images_train_op)
# images_test_op = np.array(images_test_op)
images_train = np.reshape(images_train, (len(images_train), 224, 224, 1))
images_test = np.reshape(images_test, (len(images_test), 224, 224, 1))
images_train_masked = np.reshape(images_train_masked, (len(images_train_masked), 224, 224, 1))
images_test_masked = np.reshape(images_test_masked, (len(images_test_masked), 224, 224, 1))
noise_factor = 0.01
x_train_noisy = images_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=images_train.shape)
x_test_noisy = images_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=images_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
#print images_test_op.shape
#print images_train_op.shape
print autoencoder.summary()
filepath="best_model_dae.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='max')
#autoencoder.load_weights('model_right1.h5')
history = autoencoder.fit(images_train, images_train_masked,
nb_epoch=1000,
batch_size=256,
shuffle=True,
validation_data=(images_test, images_test_masked),
callbacks=[TensorBoard(log_dir='/tmp/autoencoder'), checkpoint])
# serialize model to JSON
model_json = autoencoder.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
autoencoder.save_weights("model_best_dae.h5")
print("Saved model to disk")
| apache-2.0 |
rs2/pandas | pandas/core/ops/array_ops.py | 1 | 15476 | """
Functions for arithmetic and comparison operations on NumPy arrays and
ExtensionArrays.
"""
from datetime import timedelta
from functools import partial
import operator
from typing import Any, Tuple
import warnings
import numpy as np
from pandas._libs import Timedelta, Timestamp, lib, ops as libops
from pandas._typing import ArrayLike
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
find_common_type,
maybe_upcast_putmask,
)
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_integer_dtype,
is_list_like,
is_numeric_v_string_like,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndex, ABCSeries
from pandas.core.dtypes.missing import isna, notna
from pandas.core.ops import missing
from pandas.core.ops.dispatch import should_extension_dispatch
from pandas.core.ops.invalid import invalid_comparison
from pandas.core.ops.roperator import rpow
def comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
# Note: these checks can be for ABCIndex and not ABCIndexClass
# because that is the only object-dtype class.
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
if isinstance(y, (ABCSeries, ABCIndex)):
y = y._values
if x.shape != y.shape:
raise ValueError("Shapes must match", x.shape, y.shape)
result = libops.vec_compare(x.ravel(), y.ravel(), op)
else:
result = libops.scalar_compare(x.ravel(), y, op)
return result.reshape(x.shape)
def masked_arith_op(x: np.ndarray, y, op):
"""
If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator
"""
# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
# the logic valid for both Series and DataFrame ops.
xrav = x.ravel()
assert isinstance(x, np.ndarray), type(x)
if isinstance(y, np.ndarray):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
if len(x) != len(y):
raise ValueError(x.shape, y.shape)
else:
ymask = notna(y)
# NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex
# we would get int64 dtype, see GH#19956
yrav = y.ravel()
mask = notna(xrav) & ymask.ravel()
# See GH#5284, GH#5035, GH#19448 for historical reference
if mask.any():
with np.errstate(all="ignore"):
result[mask] = op(xrav[mask], yrav[mask])
else:
if not is_scalar(y):
raise TypeError(
f"Cannot broadcast np.ndarray with operand of type { type(y) }"
)
# mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
# 1 ** np.nan is 1. So we have to unmask those.
if op is pow:
mask = np.where(x == 1, False, mask)
elif op is rpow:
mask = np.where(y == 1, False, mask)
if mask.any():
with np.errstate(all="ignore"):
result[mask] = op(xrav[mask], y)
result, _ = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape) # 2D compat
return result
def na_arithmetic_op(left, right, op, is_cmp: bool = False):
"""
Return the result of evaluating op on the passed in values.
If native types are not compatible, try coercion to object dtype.
Parameters
----------
left : np.ndarray
right : np.ndarray or scalar
is_cmp : bool, default False
If this a comparison operation.
Returns
-------
array-like
Raises
------
TypeError : invalid operation
"""
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, left, right)
except TypeError:
if is_cmp:
# numexpr failed on comparison op, e.g. ndarray[float] > datetime
# In this case we do not fall back to the masked op, as that
# will handle complex numbers incorrectly, see GH#32047
raise
result = masked_arith_op(left, right, op)
if is_cmp and (is_scalar(result) or result is NotImplemented):
# numpy returned a scalar instead of operating element-wise
# e.g. numeric array vs str
return invalid_comparison(left, right, op)
return missing.dispatch_fill_zeros(op, left, right, result)
def arithmetic_op(left: ArrayLike, right: Any, op):
"""
Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame or Index. Series is *not* excluded.
op : {operator.add, operator.sub, ...}
Or one of the reversed variants from roperator.
Returns
-------
ndarray or ExtensionArray
Or a 2-tuple of these in the case of divmod or rdivmod.
"""
# NB: We assume that extract_array has already been called
# on `left` and `right`.
lvalues = maybe_upcast_datetimelike_array(left)
rvalues = maybe_upcast_datetimelike_array(right)
rvalues = maybe_upcast_for_op(rvalues, lvalues.shape)
if should_extension_dispatch(lvalues, rvalues) or isinstance(rvalues, Timedelta):
# Timedelta is included because numexpr will fail on it, see GH#31457
res_values = op(lvalues, rvalues)
else:
with np.errstate(all="ignore"):
res_values = na_arithmetic_op(lvalues, rvalues, op)
return res_values
def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:
"""
Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame, Series, or Index.
op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le}
Returns
-------
ndarray or ExtensionArray
"""
# NB: We assume extract_array has already been called on left and right
lvalues = maybe_upcast_datetimelike_array(left)
rvalues = right
rvalues = lib.item_from_zerodim(rvalues)
if isinstance(rvalues, list):
# TODO: same for tuples?
rvalues = np.asarray(rvalues)
if isinstance(rvalues, (np.ndarray, ABCExtensionArray)):
# TODO: make this treatment consistent across ops and classes.
# We are not catching all listlikes here (e.g. frozenset, tuple)
# The ambiguous case is object-dtype. See GH#27803
if len(lvalues) != len(rvalues):
raise ValueError(
"Lengths must match to compare", lvalues.shape, rvalues.shape
)
if should_extension_dispatch(lvalues, rvalues):
# Call the method on lvalues
res_values = op(lvalues, rvalues)
elif is_scalar(rvalues) and isna(rvalues):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(lvalues.shape, dtype=bool)
else:
res_values = np.zeros(lvalues.shape, dtype=bool)
elif is_numeric_v_string_like(lvalues, rvalues):
# GH#36377 going through the numexpr path would incorrectly raise
return invalid_comparison(lvalues, rvalues, op)
elif is_object_dtype(lvalues.dtype):
res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
else:
with warnings.catch_warnings():
# suppress warnings from numpy about element-wise comparison
warnings.simplefilter("ignore", DeprecationWarning)
with np.errstate(all="ignore"):
res_values = na_arithmetic_op(lvalues, rvalues, op, is_cmp=True)
return res_values
def na_logical_op(x: np.ndarray, y, op):
try:
# For exposition, write:
# yarr = isinstance(y, np.ndarray)
# yint = is_integer(y) or (yarr and y.dtype.kind == "i")
# ybool = is_bool(y) or (yarr and y.dtype.kind == "b")
# xint = x.dtype.kind == "i"
# xbool = x.dtype.kind == "b"
# Then Cases where this goes through without raising include:
# (xint or xbool) and (yint or bool)
result = op(x, y)
except TypeError:
if isinstance(y, np.ndarray):
# bool-bool dtype operations should be OK, should not get here
assert not (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype))
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x.ravel(), y.ravel(), op)
else:
# let null fall thru
assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except (
TypeError,
ValueError,
AttributeError,
OverflowError,
NotImplementedError,
) as err:
typ = type(y).__name__
raise TypeError(
f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array "
f"and scalar of type [{typ}]"
) from err
return result.reshape(x.shape)
def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:
"""
Evaluate a logical operation `|`, `&`, or `^`.
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame, Series, or Index.
op : {operator.and_, operator.or_, operator.xor}
Or one of the reversed variants from roperator.
Returns
-------
ndarray or ExtensionArray
"""
fill_int = lambda x: x
def fill_bool(x, left=None):
# if `left` is specifically not-boolean, we do not cast to bool
if x.dtype.kind in ["c", "f", "O"]:
# dtypes that can hold NA
mask = isna(x)
if mask.any():
x = x.astype(object)
x[mask] = False
if left is None or is_bool_dtype(left.dtype):
x = x.astype(bool)
return x
is_self_int_dtype = is_integer_dtype(left.dtype)
right = lib.item_from_zerodim(right)
if is_list_like(right) and not hasattr(right, "dtype"):
# e.g. list, tuple
right = construct_1d_object_array_from_listlike(right)
# NB: We assume extract_array has already been called on left and right
lvalues = maybe_upcast_datetimelike_array(left)
rvalues = right
if should_extension_dispatch(lvalues, rvalues):
# Call the method on lvalues
res_values = op(lvalues, rvalues)
else:
if isinstance(rvalues, np.ndarray):
is_other_int_dtype = is_integer_dtype(rvalues.dtype)
rvalues = rvalues if is_other_int_dtype else fill_bool(rvalues, lvalues)
else:
# i.e. scalar
is_other_int_dtype = lib.is_integer(rvalues)
# For int vs int `^`, `|`, `&` are bitwise operators and return
# integer dtypes. Otherwise these are boolean ops
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
res_values = na_logical_op(lvalues, rvalues, op)
# error: Cannot call function of unknown type
res_values = filler(res_values) # type: ignore[operator]
return res_values
def get_array_op(op):
"""
Return a binary array operation corresponding to the given operator op.
Parameters
----------
op : function
Binary operator from operator or roperator module.
Returns
-------
functools.partial
"""
if isinstance(op, partial):
# We get here via dispatch_to_series in DataFrame case
# TODO: avoid getting here
return op
op_name = op.__name__.strip("_").lstrip("r")
if op_name == "arith_op":
# Reached via DataFrame._combine_frame
return op
if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}:
return partial(comparison_op, op=op)
elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}:
return partial(logical_op, op=op)
elif op_name in {
"add",
"sub",
"mul",
"truediv",
"floordiv",
"mod",
"divmod",
"pow",
}:
return partial(arithmetic_op, op=op)
else:
raise NotImplementedError(op_name)
def maybe_upcast_datetimelike_array(obj: ArrayLike) -> ArrayLike:
"""
If we have an ndarray that is either datetime64 or timedelta64, wrap in EA.
Parameters
----------
obj : ndarray or ExtensionArray
Returns
-------
ndarray or ExtensionArray
"""
if isinstance(obj, np.ndarray):
if obj.dtype.kind == "m":
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._from_sequence(obj)
if obj.dtype.kind == "M":
from pandas.core.arrays import DatetimeArray
return DatetimeArray._from_sequence(obj)
return obj
def maybe_upcast_for_op(obj, shape: Tuple[int, ...]):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
shape : tuple[int]
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
"""
from pandas.core.arrays import DatetimeArray, TimedeltaArray
if type(obj) is timedelta:
# GH#22390 cast up to Timedelta to rely on Timedelta
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return Timedelta(obj)
elif isinstance(obj, np.datetime64):
# GH#28080 numpy casts integer-dtype to datetime64 when doing
# array[int] + datetime64, which we do not allow
if isna(obj):
# Avoid possible ambiguities with pd.NaT
obj = obj.astype("datetime64[ns]")
right = np.broadcast_to(obj, shape)
return DatetimeArray(right)
return Timestamp(obj)
elif isinstance(obj, np.timedelta64):
if isna(obj):
# wrapping timedelta64("NaT") in Timedelta returns NaT,
# which would incorrectly be treated as a datetime-NaT, so
# we broadcast and wrap in a TimedeltaArray
obj = obj.astype("timedelta64[ns]")
right = np.broadcast_to(obj, shape)
return TimedeltaArray(right)
# In particular non-nanosecond timedelta64 needs to be cast to
# nanoseconds, or else we get undesired behavior like
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
return Timedelta(obj)
elif isinstance(obj, np.ndarray) and obj.dtype.kind == "m":
# GH#22390 Unfortunately we need to special-case right-hand
# timedelta64 dtypes because numpy casts integer dtypes to
# timedelta64 when operating with timedelta64
return TimedeltaArray._from_sequence(obj)
return obj
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tseries/plotting.py | 1 | 7368 | """
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
#!!! TODO: Use the fact that axis can have units to simplify the process
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
#----------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
import matplotlib.pyplot as plt
ax = plt.gca()
freq = _get_freq(ax, series)
# resample against axes freq if necessary
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
else:
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
freq, ax_freq, series = _maybe_resample(series, ax, freq, plotf,
kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
# how to make sure ax.clear() flows through?
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
# x and y coord info
ax.format_coord = lambda t, y: ("t = {0} "
"y = {1:8f}".format(Period(ordinal=int(t),
freq=ax.freq),
y))
return lines
def _maybe_resample(series, ax, freq, plotf, kwargs):
ax_freq = _get_ax_freq(ax)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = series.resample('D', how=how).dropna()
series = series.resample(ax_freq, how=how).dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, plotf, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, ax_freq, series
def _get_ax_freq(ax):
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
return ax_freq
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, plotf, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append(series)
lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0])
labels.append(com.pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = getattr(ax, 'freq', None)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
pylab.draw_if_interactive()
| mit |
WarrenWeckesser/scipy | scipy/signal/_max_len_seq.py | 12 | 4962 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.array(taps) # needed for Cython and Pythran
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because NumPy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-D array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
Unidata/MetPy | v0.4/_downloads/Wind_SLP_Interpolation.py | 3 | 5173 | # Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Wind and Sea Level Pressure Interpolation
=========================================
Interpolate sea level pressure, as well as wind component data,
to make a consistent looking analysis, featuring contours of pressure and wind barbs.
"""
import cartopy
import cartopy.crs as ccrs
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import get_wind_components
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import interpolate, remove_nan_observations
from metpy.units import units
from_proj = ccrs.Geodetic()
to_proj = ccrs.AlbersEqualArea(central_longitude=-97., central_latitude=38.)
def station_test_data(variable_names, proj_from=None, proj_to=None):
f = get_test_data('station_data.txt')
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
all_stids = [s.decode('ascii') for s in all_data['stid']]
data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])
value = data[variable_names]
lon = data['lon']
lat = data['lat']
if proj_from is not None and proj_to is not None:
proj_points = proj_to.transform_points(proj_from, lon, lat)
return proj_points[:, 0], proj_points[:, 1], value
return lon, lat, value
###########################################
# Get pressure information using the sample station data
xp, yp, pres = station_test_data(['slp'], from_proj, to_proj)
###########################################
# Remove all missing data from pressure
pres = np.array([p[0] for p in pres])
xp, yp, pres = remove_nan_observations(xp, yp, pres)
###########################################
# Interpolate pressure as usual
slpgridx, slpgridy, slp = interpolate(xp, yp, pres, interp_type='cressman',
minimum_neighbors=1, search_radius=400000, hres=100000)
###########################################
# Get wind information
x, y, wind = station_test_data(['wind_speed', 'wind_dir'], from_proj, to_proj)
###########################################
# Remove bad data from wind information
wind_speed = np.array([w[0] for w in wind])
wind_dir = np.array([w[1] for w in wind])
good_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed)))
x = x[good_indices]
y = y[good_indices]
wind_speed = wind_speed[good_indices]
wind_dir = wind_dir[good_indices]
###########################################
# Calculate u and v components of wind and then interpolate both.
#
# Both will have the same underlying grid so throw away grid returned from v interpolation.
u, v = get_wind_components((wind_speed * units('m/s')).to('knots'),
wind_dir * units.degree)
windgridx, windgridy, uwind = interpolate(x, y, np.array(u), interp_type='cressman',
search_radius=400000, hres=100000)
_, _, vwind = interpolate(x, y, np.array(v), interp_type='cressman', search_radius=400000,
hres=100000)
###########################################
# Get temperature information
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
xt, yt, t = station_test_data('air_temperature', from_proj, to_proj)
xt, yt, t = remove_nan_observations(xt, yt, t)
tempx, tempy, temp = interpolate(xt, yt, t, interp_type='cressman', minimum_neighbors=3,
search_radius=400000, hres=35000)
temp = np.ma.masked_where(np.isnan(temp), temp)
###########################################
# Set up the map and plot the interpolated grids appropriately.
fig = plt.figure(figsize=(20, 10))
view = fig.add_subplot(1, 1, 1, projection=to_proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cartopy.feature.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lakes',
scale='50m', facecolor='none'))
view.add_feature(cartopy.feature.OCEAN)
view.add_feature(cartopy.feature.COASTLINE)
view.add_feature(cartopy.feature.BORDERS, linestyle=':')
cs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4)))
plt.clabel(cs, inline=1, fontsize=12, fmt='%i')
mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels)
view.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5)
plt.title('Surface Temperature (shaded), SLP, and Wind.')
plt.show()
| bsd-3-clause |
GuillaumeArruda/INF4705 | TP2/Python/Plot/Plot/Plot.py | 1 | 1476 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import functools
import csv
import scipy.optimize
import numpy
def main():
fxys = []
xs = []
ys = []
with open('d.csv', newline='') as file:
reader = csv.reader(file, delimiter=',')
for x, y, fxy in reader:
fxys.append(float(fxy))
xs.append(float(x))
ys.append(float(y))
points = []
for x, y, f in zip(xs, ys, fxys):
points.append((x, y, f))
params0 = [1, 1, 1]
fun = functools.partial(error, points=points)
res = scipy.optimize.minimize(fun, params0)
xx, yy = numpy.meshgrid(range(0, 5), range(0, 6))
z_plane = []
for x, y in zip(xx, yy):
z_plane.append(plane(x, y, res.x))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel("Log du nombre de locations")
ax.set_ylabel("Log de la prodcution maximale de poulet")
ax.set_zlabel("Log du temps de calcul (s)")
ax.scatter(xs, ys, zs=fxys)
ax.plot_surface(xx, yy, z_plane, color='g', alpha = 0.2)
print(res)
plt.title("Test de puissance de l'algorithme dynamique")
plt.show()
def plane(x, y, params):
a, b, c = params
return a*x + b*y + c
def error(params, points):
result = 0
for(x,y,z) in points:
plane_z = plane(x, y, params)
diff = abs(plane_z - z)
result += diff**2
return result
if __name__ == "__main__":
main() | bsd-3-clause |
devanshdalal/scikit-learn | examples/ensemble/plot_random_forest_regression_multioutput.py | 46 | 2640 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
danmackinlay/AutoGP | experiments/cifar10.py | 2 | 2893 | import sklearn.cluster
import numpy as np
import autogp
from autogp import likelihoods
from autogp import kernels
import tensorflow as tf
from autogp import datasets
from autogp import losses
from autogp import util
import os
import subprocess
DATA_DIR = 'experiments/data/cifar-10-batches-py/'
def init_z(train_inputs, num_inducing):
# Initialize inducing points using clustering.
mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing)
cluster_indices = mini_batch.fit_predict(train_inputs)
inducing_locations = mini_batch.cluster_centers_
return inducing_locations
def get_cifar_data():
print "Getting cifar10 data ..."
os.chdir('experiments/data')
subprocess.call(["./get_cifar10_data.sh"])
os.chdir("../../")
print "done"
def load_cifar():
if os.path.isdir(DATA_DIR) is False: # directory does not exist, download the data
get_cifar_data()
import cPickle
train_X = np.empty([0, 3072], dtype=np.float32)
train_Y = np.empty([0, 10], dtype=np.float32)
for i in range(1, 6):
f = open(DATA_DIR + "data_batch_" + str(i))
d = cPickle.load(f)
f.close()
train_X = np.concatenate([train_X, d["data"]])
train_Y = np.concatenate([train_Y, np.eye(10)[d["labels"]]])
f = open(DATA_DIR + "test_batch")
d = cPickle.load(f)
f.close()
train_X = train_X / 255.0
test_X = np.array(d["data"], dtype=np.float32) / 255.0
test_Y = np.array(np.eye(10)[d["labels"]], dtype=np.float32)
return train_X, train_Y, test_X, test_Y
if __name__ == '__main__':
FLAGS = util.util.get_flags()
BATCH_SIZE = FLAGS.batch_size
LEARNING_RATE = FLAGS.learning_rate
DISPLAY_STEP = FLAGS.display_step
EPOCHS = FLAGS.n_epochs
NUM_SAMPLES = FLAGS.mc_train
NUM_INDUCING = FLAGS.n_inducing
IS_ARD = FLAGS.is_ard
train_X, train_Y, test_X, test_Y = load_cifar()
data = datasets.DataSet(train_X, train_Y)
test = datasets.DataSet(test_X, test_Y)
# Setup initial values for the model.
likelihood = likelihoods.Softmax()
kern = [kernels.RadialBasis(data.X.shape[1], lengthscale=10.0, input_scaling = IS_ARD) for i in range(10)]
# kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in range(10)] #RadialBasis(X.shape[1], input_scaling=True) for i in range(10)]
Z = init_z(data.X, NUM_INDUCING)
m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)
# setting up loss to be reported during training
error_rate = losses.ZeroOneLoss(data.Dout)
o = tf.train.RMSPropOptimizer(LEARNING_RATE)
m.fit(data, o, loo_steps=50, var_steps=50, epochs=EPOCHS, batch_size=BATCH_SIZE, display_step=DISPLAY_STEP, test=test,
loss=error_rate)
ypred = m.predict(test.X)[0]
print("Final " + error_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred))
| apache-2.0 |
JackKelly/bayesianchangepoint | setup.py | 1 | 1725 | """
Copyright 2013 Jack Kelly (aka Daniel)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup, find_packages, Extension
from os.path import join
setup(
name='bayesianchangepoint',
version='0.1',
packages = find_packages(),
install_requires = ['numpy', 'matplotlib'],
description='An implementation of Adams and MacKay 2007'
' "Bayesian Online Changepoint Detection"'
' in Python. This code is based on the beautifully commented'
' MATLAB implementation provided by Ryan Adams.',
author='Jack Kelly',
author_email='[email protected]',
url='https://github.com/JackKelly/bayesianchangepoint',
download_url = 'https://github.com/JackKelly/bayesianchangepoint/tarball'
'/master#egg=bayesianchangepoint-dev',
long_description=open('README.md').read(),
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache 2.0',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Mathematics',
],
keywords='bayesian bayes changepoint'
)
| apache-2.0 |
andrewcbennett/iris | docs/iris/example_code/Meteorology/COP_maps.py | 6 | 5189 | """
Global average annual temperature maps
======================================
Produces maps of global temperature forecasts from the A1B and E1 scenarios.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1 scenarios, both of which were derived using the IMAGE
Integrated Assessment Model (Johns et al. 2010; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2010) Climate change under aggressive mitigation: The ENSEMBLES multi-model experiment. Climate
Dynamics (submitted)
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. Royer, and P. van der Linden, 2009. New
Study For Climate Modeling, Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21.
"""
from six.moves import zip
import os.path
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coords as coords
import iris.plot as iplt
def cop_metadata_callback(cube, field, filename):
""" A function which adds an "Experiment" coordinate which comes from the filename. """
# Extract the experiment name (such as a1b or e1) from the filename (in this case it is just the parent folder's name)
containing_folder = os.path.dirname(filename)
experiment_label = os.path.basename(containing_folder)
# Create a coordinate with the experiment label in it
exp_coord = coords.AuxCoord(experiment_label, long_name='Experiment', units='no_unit')
# and add it to the cube
cube.add_aux_coord(exp_coord)
def main():
# Load e1 and a1 using the callback to update the metadata
e1 = iris.load_cube(iris.sample_data_path('E1.2098.pp'),
callback=cop_metadata_callback)
a1b = iris.load_cube(iris.sample_data_path('A1B.2098.pp'),
callback=cop_metadata_callback)
# Load the global average data and add an 'Experiment' coord it
global_avg = iris.load_cube(iris.sample_data_path('pre-industrial.pp'))
# Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the specific colours
levels = np.arange(20) - 2.5
red = np.array([0, 0, 221, 239, 229, 217, 239, 234, 228, 222, 205, 196, 161, 137, 116, 89, 77, 60, 51]) / 256.
green = np.array([16, 217, 242, 243, 235, 225, 190, 160, 128, 87, 72, 59, 33, 21, 29, 30, 30, 29, 26]) / 256.
blue = np.array([255, 255, 243, 169, 99, 51, 63, 37, 39, 21, 27, 23, 22, 26, 29, 28, 27, 25, 22]) / 256.
# Put those colours into an array which can be passed to conourf as the specific colours for each level
colors = np.array([red, green, blue]).T
# Subtract the global
# Iterate over each latitude longitude slice for both e1 and a1b scenarios simultaneously
for e1_slice, a1b_slice in zip(e1.slices(['latitude', 'longitude']),
a1b.slices(['latitude', 'longitude'])):
time_coord = a1b_slice.coord('time')
# Calculate the difference from the mean
delta_e1 = e1_slice - global_avg
delta_a1b = a1b_slice - global_avg
# Make a wider than normal figure to house two maps side-by-side
fig = plt.figure(figsize=(12, 5))
# Get the time datetime from the coordinate
time = time_coord.units.num2date(time_coord.points[0])
# Set a title for the entire figure, giving the time in a nice format of "MonthName Year". Also, set the y value for the
# title so that it is not tight to the top of the plot.
fig.suptitle('Annual Temperature Predictions for ' + time.strftime("%Y"), y=0.9, fontsize=18)
# Add the first subplot showing the E1 scenario
plt.subplot(121)
plt.title('HadGEM2 E1 Scenario', fontsize=10)
iplt.contourf(delta_e1, levels, colors=colors, linewidth=0, extend='both')
plt.gca().coastlines()
# get the current axes' subplot for use later on
plt1_ax = plt.gca()
# Add the second subplot showing the A1B scenario
plt.subplot(122)
plt.title('HadGEM2 A1B-Image Scenario', fontsize=10)
contour_result = iplt.contourf(delta_a1b, levels, colors=colors, linewidth=0, extend='both')
plt.gca().coastlines()
# get the current axes' subplot for use later on
plt2_ax = plt.gca()
# Now add a colourbar who's leftmost point is the same as the leftmost point of the left hand plot
# and rightmost point is the rightmost point of the right hand plot
# Get the positions of the 2nd plot and the left position of the 1st plot
left, bottom, width, height = plt2_ax.get_position().bounds
first_plot_left = plt1_ax.get_position().bounds[0]
# the width of the colorbar should now be simple
width = left - first_plot_left + width
# Add axes to the figure, to place the colour bar
colorbar_axes = fig.add_axes([first_plot_left, bottom + 0.07, width, 0.03])
# Add the colour bar
cbar = plt.colorbar(contour_result, colorbar_axes, orientation='horizontal')
# Label the colour bar and add ticks
cbar.set_label(e1_slice.units)
cbar.ax.tick_params(length=0)
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
mattilyra/scikit-learn | sklearn/preprocessing/tests/test_data.py | 10 | 59957 |
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.externals.six import u
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
@ignore_warnings
def test_deprecation_minmax_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = MinMaxScaler().fit(X)
depr_message = ("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
data_range = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_range")
assert_array_equal(data_range, scaler.data_range)
depr_message = ("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
data_min = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_min")
assert_array_equal(data_min, scaler.data_min)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_deprecation_standard_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = StandardScaler().fit(X)
depr_message = ("Function std_ is deprecated; Attribute ``std_`` will be "
"removed in 0.19. Use ``scale_`` instead")
std_ = assert_warns_message(DeprecationWarning, depr_message, getattr,
scaler, "std_")
assert_array_equal(std_, scaler.scale_)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/cluster/plot_face_segmentation.py | 26 | 2561 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
# load the raccoon face as a numpy array
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| gpl-3.0 |
arjoly/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
kyleam/seaborn | seaborn/tests/test_utils.py | 2 | 11252 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
from ..utils import get_dataset_names, load_dataset
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from .. import utils, rcmod
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(object):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
plt.close("all")
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
plt.close("all")
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
plt.close("all")
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
plt.close("all")
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
plt.close('all')
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
Ernestyj/PyStudy | finance/WeekTest/AdaboostSGDTest.py | 1 | 2612 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import talib
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,.3f}'.format
from WeekDataPrepare import readWSDFile, readWSDIndexFile, prepareData, optimizeSVM
from sklearn import preprocessing, cross_validation, metrics, pipeline, grid_search
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier, LogisticRegression, RidgeClassifier
from sklearn.tree import DecisionTreeClassifier
baseDir = '/Users/eugene/Downloads/Data/'
instruments = ['000300.SH', '000016.SH', '000905.SH']
i = 2
startYear = 2015
yearNum = 1
df = readWSDFile(baseDir, instruments[i], startYear, yearNum)
print 'Day count:', len(df)
# print df.head(5)
dfi = readWSDIndexFile(baseDir, instruments[i], startYear, yearNum)
X, y, actionDates = prepareData(df, dfi)
print np.shape(X)
normalizer = preprocessing.Normalizer().fit(X) # fit does nothing
X_norm = normalizer.transform(X)
def optimizeAdaBoostSGD(X_norm, y, kFolds=10):
# grid search 多参数优化
parameters = {
# 'base_estimator__alpha': 10.0 ** (-np.arange(1, 7)),
'base_estimator__alpha': np.logspace(-8, -1, 8),
# 'n_estimators': np.linspace(1, 100, 10, dtype=np.dtype(np.int16)),
}
# sgd = SGDClassifier(loss='log', n_iter=np.ceil(10**6/len(X_norm)))
sgd = SGDClassifier(loss='log', n_iter=5, random_state=47)
clf = AdaBoostClassifier(base_estimator=sgd, n_estimators=200, random_state=47)
gs = grid_search.GridSearchCV(clf, parameters, verbose=1, refit=False, cv=kFolds)
gs.fit(X_norm, y)
return gs.best_params_['base_estimator__alpha'], gs.best_score_
def evaluate_cross_validation(clf, X, y, K):
from scipy.stats import sem
cv = cross_validation.KFold(len(y), K, shuffle=True, random_state=0)
scores = cross_validation.cross_val_score(clf, X, y, cv=cv)
print '*********************************evaluate_cross_validation*********************************'
print "scores:", scores
print ("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores), sem(scores))
clf = AdaBoostClassifier(base_estimator=SGDClassifier(loss='log'), n_estimators=200)
# evaluate_cross_validation(clf, X_norm, y, 10)
# alpha, score = optimizeAdaBoostSGD(X_norm, y, kFolds=10)
# print 'alpha',alpha, 'score=',score
| apache-2.0 |
JoshDaly/scriptShed | phage_genome_plotter2.py | 1 | 33041 | #!/usr/bin/env python
###############################################################################
#
# phage_genome_plotter2.py - Tell me all about a phage and the spacers and snps
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from __future__ import division, print_function
__author__ = "Connor Skennerton"
__copyright__ = "Copyright 2013"
__credits__ = ["Connor Skennerton"]
__license__ = "GPL3"
__version__ = "0.0.1"
__maintainer__ = "Connor Skennerton"
__email__ = "[email protected]"
__status__ = "Development"
###############################################################################
import argparse
import sys
from collections import defaultdict
import os
import sqlite3
#import errno
import numpy as np
np.seterr(all='raise')
import matplotlib as mpl
mpl.use('Agg')
from matplotlib.ticker import NullFormatter, MaxNLocator
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import axes3d, Axes3D
#from pylab import plot,subplot,axis,stem,show,figure
import vcf
from datetime import date
###############################################################################
###############################################################################
###############################################################################
###############################################################################
names_to_times = {
'M92206': date(2011, 6, 22),
'M92705': date(2011, 5, 27),
'M90108': date(2011, 8, 1),
'M80509': date(2011, 9, 5),
'M81612': date(2011, 12, 16),
'M81706': date(2011, 6, 17),
'M91801': date(2012, 1, 18),
'M90809': date(2011, 9, 8),
'M92511': date(2011, 11, 25),
'V90102': date(2012, 2, 1),
'V90104': date(2011, 4, 1),
'V90106': date(2011, 6, 1),
'V90308': date(2011, 8, 3),
'V90401': date(2012, 1, 4),
'V90709': date(2011, 9, 7),
'V90806': date(2011, 6, 8),
'V90903': date(2011, 3, 9),
'V91210': date(2011, 10, 12),
'V91307': date(2011, 7, 13),
'V91409': date(2011, 9, 14),
'V91412': date(2011, 12, 14),
'V91506': date(2011, 6, 15),
'V91801': date(2012, 1, 18),
'V91802': date(2011, 2, 18),
'V91805': date(2011, 5, 18),
'V92007': date(2011, 7, 20),
'V92206': date(2011, 6, 22),
'V92704': date(2011, 4, 27),
'V92809': date(2011, 9, 28),
'V92810': date(2011, 10, 28),
'V93108': date(2011, 8, 31)
}
names_to_times['ACSBR9'] = names_to_times['V91506']
class PlotFormatter(object):
"""docstring for PlotFormatter"""
def __init__(self):
super(PlotFormatter, self).__init__()
self.combinations = []
self.index = 0
mpl_line_styles = ['-' , '--' , '-.' , ':']
mpl_line_marker_styles = [ 'o', 'v', '^', '<', '>', '1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd']
mpl_line_color_styles = ['b', 'g', 'r', 'c', 'm', 'y']
mpl_combinations = []
for k in mpl_line_styles:
for i in mpl_line_marker_styles:
for j in mpl_line_color_styles:
self.combinations.append((i, j, k))
def __len__(self):
return len(self.combinations)
def __call__(self):
return self.format()
def format(self):
f = self.combinations[self.index]
self.index += 1
if self.index >= len(self):
self.index = 0
return f
class Crispr(object):
"""holds many spacers and some metadata about formatting for plots"""
def __init__(self, format):
super(Crispr, self).__init__()
self.spacer_clusters = dict()
self.host = None
self.format = format
self.marker = self.format[0]
self.line = self.format[2]
self.colour = self.format[1]
def __len__(self):
return len(self.spacer_clusters)
def __getitem__(self, key):
return self.spacer_clusters[key]
def __setitem__(self, key, value):
self.spacer_clusters[key] = value
def __delitem__(self, key):
del self.spacer_clusters[key]
def __contains__(self, item):
return item in self.spacer_clusters
def __iter__(self):
return iter(self.spacer_clusters)
class Snp(object):
def __init__(self, pos, alt, timepoints=None):
super(Snp, self).__init__()
self.pos = pos
self.alt = alt
if timepoints is None:
self.timepoints = []
else:
self.timepoints = timepoints
class Spacer(object):
def __init__(self, spid=None, start=0, end=0, contig=None, crispr=None, cluster=None, host=None, timepoint=None, cov=None):
if start > end:
self.start = end
self.end = start
else:
self.start = start
self.end = end
self.id = spid
self.contig = contig
self.timepoint = timepoint
self.host = host
self.crispr = crispr
self.cluster = cluster
self.cov = cov
class Contig(object):
def __init__(self, Id, Name, length, timepoint=None):
super(Contig, self).__init__()
self.id = Id
self.Name = Name
self.length = length
self.timepoint = timepoint
# list of 2-tuple of the positions of and timepoints of snps and
# spacers
self.snps = []
self.spacers = {}
self._relabs = 0.0
def __len__(self):
return self.length
@property
def relative_abundance(self):
return self._relabs
@relative_abundance.setter
def relative_abundance(self, value):
self._relabs = value
def get_spacers(self):
ret = {} #defaultdict(dict)
for sp in self.spacers.values():
if sp.crispr in ret:
ret[sp.crispr]['host'] = sp.host
ret[sp.crispr]['data'].append([sp.start, sp.timepoint])
else:
ret[sp.crispr] = dict()
ret[sp.crispr]['host'] = sp.host
ret[sp.crispr]['data'] = []
ret[sp.crispr]['data'].append([sp.start, sp.timepoint])
return ret
def get_spacer_ids(self):
ret = set()
for spid in self.spacers.keys():
ret.add(spid)
return ret
def get_spacer_positions(self):
ret = {}
for sp in self.spacers.values():
ret[sp.cluster] = sp.start
return ret
def has_spacer_cluster(self, spid):
if spid in self.spacers:
return True
return False
def get_spacer_clusters(self):
ret = set()
for sp in self.spacers.values():
ret.add(sp.cluster)
return ret
def get_crisprs(self, formatter):
"""Return a mapping of crispr clusters and their spacers as Crispr objects"""
ret = {}
for sp in self.spacers.values():
if sp.crispr not in ret:
ret[sp.crispr] = Crispr(formatter.format())
ret[sp.crispr][sp.id] = sp
ret[sp.crispr].host = sp.host
return ret
def get_spacer_from_id(self, spid):
try:
return self.spacers[spid]
except KeyError:
return None
def get_spacers_from_spacer_cluster(self, cid):
for sp in self.spacers.values():
if sp.cluster == cid:
yield sp
class ScaffoldFragment(Contig):
def __init__(self, Id, Name, start, Length, complement, timepoint=None):
super(ScaffoldFragment, self).__init__(Id, Name, Length,
timepoint=timepoint)
self.start = start
self.complement = complement
class Scaffold(object):
def __init__(self, Id, Name, Length, timepoint=None):
super(Scaffold, self).__init__()
self.id = Id
self.name = Name
self.length = Length
self.fragments = {}
self.timepoint = timepoint
self.snps = []
def __len__(self):
return self.length
@property
def relative_abundance(self):
sum = 0.0
for fragid, fragobj in self.fragments.items():
sum += fragobj.relative_abundance
frag_count = len(self.fragments)
return sum / len(self.fragments)
def get_spacers(self):
ret = {} #defaultdict(dict)
for frag in self.fragments.values():
for sp in frag.spacers.values():
if frag.complement:
spacer_pos = sp.start + frag.start - frag.length
else:
spacer_pos = sp.start + frag.start
if sp.crispr in ret:
ret[sp.crispr]['host'] = sp.host
ret[sp.crispr]['data'].append([spacer_pos, sp.timepoint])
else:
ret[sp.crispr] = dict()
ret[sp.crispr]['host'] = sp.host
ret[sp.crispr]['data'] = []
ret[sp.crispr]['data'].append([spacer_pos, sp.timepoint])
return ret
def get_spacer_positions(self):
ret = {}
for frag in self.fragments.values():
for sp in frag.spacers.values():
if frag.complement:
spacer_pos = sp.start + frag.start - frag.length
else:
spacer_pos = sp.start + frag.start
ret[sp.cluster] = spacer_pos
return ret
def get_spacer_ids(self):
ret = set()
for frag in self.fragments.values():
for spid in frag.spacers.keys():
ret.add(spid)
return ret
def has_spacer_cluster(self, spid):
for frag in self.fragments.values():
if spid in frag.spacers:
return True
return False
def get_spacer_clusters(self):
ret = set()
for frag in self.fragments.values():
for sp in frag.spacers.values():
ret.add(sp.cluster)
return ret
def get_crisprs(self, formatter):
"""Return a mapping of crispr clusters and their spacers as Crispr objects"""
ret = {}
for frag in self.fragments.values():
for sp in frag.spacers.values():
if sp.crispr not in ret:
ret[sp.crispr] = Crispr(formatter.format())
ret[sp.crispr][sp.id] = sp
ret[sp.crispr].host = sp.host
return ret
def get_spacer_from_id(self, spid):
for frag in self.fragments.values():
try:
return frag.spacers[spid]
except KeyError:
pass
return None
def get_spacers_from_spacer_cluster(self, cid):
for frag in self.fragments.values():
for sp in frag.spacers.values():
if sp.cluster == cid:
yield sp
class ContigCluster(object):
''' Hold all the contigs for a cluster
The main job of this class is to hold some metadata about contig
clusters such as the 'reference' contig, which is the one that is
called the reference in the VCF file for this cluster, from which the
snp and spacer positions will be determined.
'''
def __init__(self):
self.contigs = {}
self.reference = None
self.spacer_matrix = {}
self.spacer_pos = {}
def __len__(self):
return len(self.contigs)
def __getitem__(self, key):
return self.contigs[key]
def __setitem__(self, key, value):
self.contigs[key] = value
def __delitem__(self, key):
del self.contigs[key]
def __contains__(self, item):
return item in self.contigs
def __iter__(self):
return iter(self.contigs)
def values(self):
return self.contigs.values()
def keys(self):
return self.contigs.keys()
def get_contigs_times(self, timepoint):
for contig in self.contigs.values():
if contig.timepoint == timepoint:
yield contig
#return [contig if contig.timepoint == timepoint for contig in self.contigs.values()]
def populate_spacer_matrix(self, sorted_times):
reference_spacers = self.reference.get_spacer_ids()
reference_spacer_clusters = self.reference.get_spacer_clusters()
self.spacer_pos = self.reference.get_spacer_positions()
for spid in reference_spacers:
spacer = self.reference.get_spacer_from_id(spid)
if spacer.crispr not in self.spacer_matrix:
self.spacer_matrix[spacer.crispr] = dict()
if self.reference.timepoint not in self.spacer_matrix[spacer.crispr]:
self.spacer_matrix[spacer.crispr][self.reference.timepoint] = set()
self.spacer_matrix[spacer.crispr][self.reference.timepoint].add(spacer.cluster)
if spacer.host is not None:
#print "Host for crispr %s is %s" % (str(spacer.crispr), str(spacer.host))
self.spacer_matrix[spacer.crispr]['host'] = spacer.host
#self.spacer_pos[spacer.cluster] = spacer.start
for contig in self.contigs.values():
if contig.timepoint == self.reference.timepoint:
continue
spacer_clusters_for_current = reference_spacer_clusters & contig.get_spacer_clusters()
for spc in spacer_clusters_for_current:
for spacer in contig.get_spacers_from_spacer_cluster(spc):
if spacer.crispr not in self.spacer_matrix:
#self.spacer_matrix[spacer.crispr] = dict()
#print "WARNING: CRISPR %s in timepoint %s not in reference %s" % (str(spacer.crispr), str(contig.timepoint), str(self.reference.name))
continue
if contig.timepoint not in self.spacer_matrix[spacer.crispr]:
self.spacer_matrix[spacer.crispr][contig.timepoint] = set()
self.spacer_matrix[spacer.crispr][contig.timepoint].add(spacer.cluster)
@property
def relative_abundance(self):
times = defaultdict(list)
for contig_data in self.contigs.values():
times[contig_data.timepoint].append(contig_data.relative_abundance)
for t, data in times.items():
s = sum(data)
times[t] = s / len(data)
sorted_times = sorted(times.keys())
sorted_points = [times[x] for x in sorted_times ]
return sorted_times, sorted_points
def generate_placeholders(l):
placeholder= '?' # For SQLite. See DBAPI paramstyle.
placeholders= ', '.join(placeholder * len(l))
return placeholders
def get_contig_length(cur, name):
cur.execute('''SELECT Length FROM contigs WHERE Id = ?''', (name,))
result = cur.fetchone()
if not result:
raise RuntimeError('could not find contigs name %s in the database' % name)
return result[0]
def get_spacers_for_contig(cur, name):
cur.execute('''CREATE TEMP VIEW IF NOT EXISTS spacer_match_times AS
SELECT
ContigID,
SpacerID,
SpacerCluster,
SpacerTp,
contigs.Timepoint AS ContigTp,
Start,
End,
CrisprClusterID,
HostID
FROM
contigs
JOIN
(
SELECT
spacer_matches.ContigID,
crispr_host_connector.SpacerID,
SpacerCluster,
SpacerTp,
spacer_matches.Start,
spacer_matches.End,
crispr_host_connector.CrisprClusterID,
crispr_host_connector.HostID
FROM
crispr_host_connector
JOIN
spacer_matches
ON
crispr_host_connector.SpacerID = spacer_matches.SpacerID
)
ON
ContigID = contigs.Id'''
)
cur.execute('''select CrisprClusterID, HostID, Start, End, ContigTp, SpacerID, SpacerCluster from
spacer_match_times where ContigID = ?''', (name,))
result = cur.fetchall()
if not result:
return dict()
#raise RuntimeError('problem with returning spacers for %s' % name)
ret = {}
for row in result:
ret[row[5]] = Spacer(row[5], row[2], row[3], name, row[0], host=row[1], timepoint=row[4], cluster=row[6])
return ret
def get_relative_abundance(cur, name):
cur.execute(''' SELECT
Relab
FROM
times
JOIN
(
SELECT
Timepoint AS Tp,
Relab
FROM
contig_relabs
WHERE
ContigID = ?
) On times.Id=Tp''', (name,))
result = cur.fetchone()
if not result:
raise RuntimeError('cannot get the relative abundance of %s' % name)
#print result
return result[0]
def get_contigs_for_scaffold(cur, scaff_id):
fragments = {}
cur.execute('''select contigs.Name, contigs.Id, scaffold_fragments.Start, contigs.Length, scaffold_fragments.Oreintation
from scaffold_fragments
join contigs
on ContigID = Id
where ScaffoldID = ?''', (scaff_id,))
result = cur.fetchall()
if result is None:
raise RuntimeError('Cannot find contigs for scaffold %d' % scaff_id)
else:
for row in result:
complement = False
if row[4] == '-':
complement = True
fragments[row[1]] = ScaffoldFragment(row[1], row[0], row[2], row[3], complement)
return fragments
def generate_contig_query_list(name, name_file):
query = []
if name is None and name_file is None:
raise RuntimeError('must provide at least one of name or name_file')
if name is not None:
if isinstance(name, list):
query.extend(name)
if isinstance(name, str):
query.append(name)
if name_file is not None:
for line in name_file:
line = line.rstrip()
query.append(line)
return query
def get_cluster_id_from_contigs(cur, name=None, name_file=None):
cl = generate_contig_query_list(name, name_file)
query = "SELECT\
Cluster, Name\
FROM\
contigs\
WHERE\
Name IN (%s)" % generate_placeholders(cl)
cur.execute(query, tuple(cl))
result = cur.fetchall()
if not result:
raise RuntimeError("Cannot identify any clusters for given contigs")
else:
ret = {}
for row in result:
if row[0] not in ret:
ret[row[0]] = ContigCluster()
ret[row[0]].reference = row[1]
#ret = [ x[0] if x[0] for x in result ]
return ret
def get_contigs_for_clusters(cur, clusters):
query ="SELECT \
contigs.Name,\
contigs.Id, \
contigs.Flag,\
contigs.Length,\
contigs.Cluster,\
times.Timepoint\
FROM \
contigs\
JOIN\
times\
ON\
contigs.Timepoint = times.Id\
WHERE \
Cluster IN (%s)" % generate_placeholders(clusters.keys())
cur.execute(query, tuple(clusters))
result = cur.fetchall()
for row in result:
if row[2] & 4:
clusters[row[4]][row[0]] = Scaffold(row[1], row[0], row[3], timepoint=row[5])
clusters[row[4]][row[0]].fragments = get_contigs_for_scaffold(cur, row[1])
else:
clusters[row[4]][row[0]] = Contig(row[1], row[0], row[3], timepoint=row[5])
if row[0] == clusters[row[4]].reference:
clusters[row[4]].reference = clusters[row[4]][row[0]]
return clusters
def generate_contig_ids(cur, name=None, name_file=None):
id_map = {}
query = generate_contig_query_list(name, name_file)
try:
cur.execute('''SELECT
contigs.Name,
contigs.Id,
contigs.Flag,
contigs.Length,
contigs.Cluster,
times.Timepoint
FROM
contigs
JOIN
times
ON
contigs.Timepoint = times.Id
WHERE
Name IN (?)''', tuple(query))
#cur.execute('''SELECT Name, Id, Flag, Length FROM contigs WHERE Name IN (?)''', tuple(query))
except sqlite3.ProgrammingError:
print(query)
return {}
result = cur.fetchall()
if not result:
raise RuntimeError('wierd')
for row in result:
if row[2] & 4:
id_map[row[0]] = Scaffold(row[1], row[0], row[3], timepoint=row[5])
id_map[row[0]].fragments = get_contigs_for_scaffold(cur, row[1])
else:
id_map[row[0]] = Contig(row[1], row[0], row[3], timepoint=row[5])
return id_map
def get_all_timepoints(cur):
cur.execute('''SELECT Timepoint FROM times''')
result = cur.fetchall()
if not result:
raise RuntimeError(" can't get times, database corrupt")
return sorted([x[0] for x in result])
def calc_host_relab(cur, host):
cur.execute('''SELECT Relab, times.Timepoint
FROM times JOIN (
SELECT Timepoint AS Tp, Relab
FROM host_relabs WHERE HostID = ?
) ON Tp=times.Id WHERE times.Reactor='SBR9' AND times.flag & 4''', (host,))
result = cur.fetchall()
if not result:
raise RuntimeError('cannot get the relative abundance of %s' % host)
return result
def unzip(data):
v, h = zip(*data)
v = [x if x is not None else 0 for x in v ]
return v, h
def set_plot(sorted_dates, title=None, yscale=None):
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histy = [left_h, bottom, 0.2, height]
fig = plt.figure()
axScatter = fig.add_axes(rect_scatter)
axHisty = fig.add_axes(rect_histy)
axHisty.set_xlabel('relative abundance')
axHisty.yaxis.set_major_formatter(NullFormatter())
axHisty.set_ylim((mpl.dates.date2num(sorted_dates[0]),
mpl.dates.date2num(sorted_dates[-1])))
axHisty.xaxis.set_major_locator(MaxNLocator(4))
axHisty2 = axHisty.twiny()
axHisty2.set_ylim((mpl.dates.date2num(sorted_dates[0]),
mpl.dates.date2num(sorted_dates[-1])))
axHisty2.xaxis.set_major_locator(MaxNLocator(4))
axHisty2.yaxis.set_major_formatter(NullFormatter())
labels = axHisty2.get_xticklabels()
for label in labels:
label.set_rotation(-30)
label.set_horizontalalignment('right')
label.set_size('small')
return fig, axScatter, axHisty, axHisty2
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def doWork( args ):
""" Main wrapper"""
conn=sqlite3.connect(args.database, detect_types=sqlite3.PARSE_DECLTYPES)
cur=conn.cursor()
sorted_dates = get_all_timepoints(cur)
clusters = get_cluster_id_from_contigs(cur, args.contigs, args.contigs_file)
clusters = get_contigs_for_clusters(cur, clusters)
for cluster_name, contig_data in clusters.items():
for contigobj in contig_data.values():
if isinstance(contigobj, Scaffold):
for fragid, fragobj in contigobj.fragments.items():
fragobj.spacers.update(get_spacers_for_contig(cur, fragid))
fragobj.relative_abundance = get_relative_abundance(cur, fragid)
else:
contigobj.spacers.update(get_spacers_for_contig(cur, contigobj.id))
contigobj.relative_abundance = get_relative_abundance(cur, contigobj.id)
cluster_reference = None
if os.path.exists(args.snps) and os.stat(args.snps).st_size != 0:
vcf_reader = vcf.Reader(open(args.snps))
for record in vcf_reader:
if cluster_reference is None:
for clust_id, clust in clusters.items():
try:
cluster_reference = clust[record.CHROM]
clusters[clust_id].reference = clust[record.CHROM]
#print clusters[clust_id].reference
#print cluster_reference
except KeyError:
print("Cannot find %s for cluster" % (record.CHROM, ))
if record.QUAL is not None and int(record.QUAL) < int(args.snp_quality):
continue
if cluster_reference is None:
continue
for sample in record.samples:
if sample['GT'] != '0/0' and sample['GT'] != './.' and sample['GT'] != '0':
cluster_reference.snps.append([record.POS,
names_to_times[sample.sample]])
#-----
# make a 2d plot
for cluster_id, cluster_data in clusters.items():
fig, axScatter, axHisty, axHisty2 = set_plot(sorted_dates)
ra_time, ra_point = cluster_data.relative_abundance
ra_time = mpl.dates.date2num(ra_time)
#print(ra_time, ra_point, sep="\t")
axHisty.plot_date(ra_point, ra_time, xdate=False,
ydate=True, color='0.7', marker=' ', linestyle='--')
axScatter.set_xlim([0, len(cluster_data.reference)])
axScatter.set_ylim((mpl.dates.date2num(sorted_dates[0]),
mpl.dates.date2num(sorted_dates[-1])))
axScatter.set_xlabel('genome position (bp)')
axScatter.set_title("Phage %s" % (str(cluster_id)))
plotted_host = False
marker_style_index = 0
formatter = PlotFormatter()
cluster_data.populate_spacer_matrix(sorted_dates)
for crispr_cluster, crispr_data in cluster_data.spacer_matrix.items():
#print(crispr_cluster, crispr_data, sep=' ')
format = formatter()
try:
if crispr_data['host'] is not None:
#print "Crispr %s has host %s" % (str(crispr_cluster), str(crispr_data['host']))
host_relab_data = calc_host_relab(cur, crispr_data['host'])
v, h = unzip(host_relab_data)
h = list(h)
h = mpl.dates.date2num(h)
axHisty2.plot_date(v, h, xdate=False, ydate=True,
label=str(crispr_data['host']), alpha=0.5,
ls=format[2],
marker=format[0],
color=format[1])
plotted_host = True
except KeyError:
pass
for timepoint, spacers in crispr_data.items():
if timepoint == 'host':
continue
sp_dates = []
sp_points = []
for spcid in spacers:
sp_dates.append(timepoint)
try:
sp_points.append(cluster_data.spacer_pos[spcid])
except TypeError, e:
print(e, file=sys.stderr)
print(cluster_data.spacer_pos, spcid, sep=' ')
print(cluster_id, cluster_data.reference, sep="\t")
for timepoint, spacers in crispr_data.items():
for spcid in spacers:
print(timepoint, spcid)
sys.exit(1)
sp_dates = mpl.dates.date2num(sp_dates)
axScatter.plot_date(sp_points, sp_dates, label=crispr_cluster, alpha=0.5,
xdate=False, ydate=True,
marker=format[0],
color=format[1],
ms=5)
if plotted_host is False:
axHisty2.xaxis.set_major_formatter(NullFormatter())
if len(cluster_data.reference.snps):
sn_points, sn_dates = unzip(cluster_data.reference.snps)
sn_dates = mpl.dates.date2num(sn_dates)
axScatter.plot_date(sn_points, sn_dates, color='0.7', marker='.',
xdate=False, ydate=True)
axScatter.tick_params(axis='y', labelsize='small')
axHisty.tick_params(axis='y', labelsize='small')
#
# Change the formatting of the xlabels to make them pretty
#
labels = axScatter.get_xticklabels()
for label in labels:
label.set_rotation(30)
label.set_horizontalalignment('right')
label.set_size('small')
labels = axHisty.get_xticklabels()
for label in labels:
label.set_rotation(30)
label.set_horizontalalignment('right')
label.set_size('small')
plt.savefig(os.path.join(args.output, "cluster_%d.png" % cluster_id), dpi=300,
format='png')
#-----
# clean up!
plt.close(fig)
del fig
return 0
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('database',
help="path to sqlite database containing information about phage")
parser.add_argument('-c', '--contigs', dest='contigs_file', type=argparse.FileType('r'),
help='A file containing contigs to consider.')
parser.add_argument('-q', '--snp-quality', dest='snp_quality', default=20,
help="Minimum quality for a SNP")
parser.add_argument('-s', '--snp', dest='snps',
help="vcf file containing SNPs")
parser.add_argument('-o', '--output', dest='output', default='.',
help="output directory for the image")
parser.add_argument('contigs', nargs='*', help='Name of contigs to consider')
# parse the arguments
args = parser.parse_args()
# do what we came here to do
doWork(args)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
| gpl-2.0 |
ltiao/scikit-learn | sklearn/kernel_ridge.py | 37 | 6556 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
mrlb05/Nifty4Gemini | nifty/pipeline/steps/routines/nifsMakeTelluric.py | 4 | 13051 | import sys, glob, shutil, getopt, os, time, logging, glob, sgmllib, urllib, re, traceback, pkg_resources
import pexpect as p
from pyraf import iraf, iraffunctions
import astropy.io.fits
from astropy.io.fits import getdata, getheader
import numpy as np
from scipy.interpolate import interp1d
from scipy import arange, array, exp
from scipy.ndimage.interpolation import shift
import pylab as pl
import matplotlib.pyplot as plt
# LOCAL
# Import config parsing.
from ..configobj.configobj import ConfigObj
# Import custom Nifty functions.
from ..nifsUtils import datefmt, listit, writeList, checkLists, makeSkyList, MEFarith, convertRAdec
# Import Nifty python data cube merging script.
from .nifsMerge import mergeCubes
# Define constants
# Paths to Nifty data.
RECIPES_PATH = pkg_resources.resource_filename('nifty', 'recipes/')
RUNTIME_DATA_PATH = pkg_resources.resource_filename('nifty', 'runtimeData/')
def makeTelluricCorrection(
telluricDirectory, path, continuuminter, hlineinter, tempInter, hline_method="vega", spectemp="",
mag="", log="test.log", over=False):
"""FLUX CALIBRATION
Consists of this start function and six required functions at the end of
this file.
"""
"""iraf.gemini(_doprint=0, motd="no")
iraf.gnirs(_doprint=0)
iraf.imutil(_doprint=0)
iraf.onedspec(_doprint=0)
iraf.nsheaders('nifs',Stdout='/dev/null')"""
# Overview of Telluric Correction procedure:
# We make a telluric correction by:
# Remove H-lines from combined 1D standard star spectrum.
# Divide by H-line corrected standard spectrum by continuum fit.
# We apply a telluric correction by:
# Dividing the cube by the correction spectrum (with iraf.telluric) to figure out the shift and scaling.
# Dividing again by the continuum to add a continuum shape back in.
# Telluric correction done.
# Overview of flux calibration procedure:
# Make a blackbody spectrum.
# Scale to the observed magnitude of the standard.
# Multiply telluric corrected target spectrum by this scaled blackbody.
# Done!
iraffunctions.chdir(telluricDirectory)
logging.info('I am starting to create telluric correction spectrum and blackbody spectrum')
logging.info('I am starting to create telluric correction spectrum and blackbody spectrum ')
# Open the combine extracted 1d spectrum.
try:
combined_extracted_1d_spectra = str(open('telluricfile', 'r').readlines()[0]).strip()
except:
logging.info("No telluricfile found in " + str(telluricDirectory) + "Skipping telluric correction and flux calibration.")
return
if not os.path.exists('scienceMatchedTellsList'):
logging.info("No scienceMatchedTellsList found in " + str(telluricDirectory))
return
telheader = astropy.io.fits.open(combined_extracted_1d_spectra+'.fits')
grating = telheader[0].header['GRATING'][0]
# Get standard star spectral type, teff, and magnitude from the interwebs. Go forth, brave parser!
getStandardInfo(path, mag, grating, spectemp)
hLineCorrection(combined_extracted_1d_spectra, grating, path, hlineinter, tempInter, hline_method, log, over)
# Fit a continuum from the standard star spectrum, saving both continuum and continuum divided standard spectrum.
fitContinuum(continuuminter, tempInter, grating)
# Divide the standard star spectrum by the continuum to normalize it.
if os.path.exists("telluricCorrection.fits"):
os.remove("telluricCorrection.fits")
iraf.imarith('final_tel_no_hlines_no_norm', "/", 'fit', result='telluricCorrection',title='',divzero=0.0,hparams='',pixtype='',calctype='',verbose='no',noact='no',mode='al')
# Done deriving telluric correction! We have two new products:
# 1) A continuum-normalized telluric correction spectrum, telluricCorrection.fits, and
# 2) The continuum we used to normalize it, fit.fits.
def hLineCorrection(combined_extracted_1d_spectra, grating, path, hlineinter, tempInter, hline_method, log, over, airmass_std=1.0):
"""
Remove hydrogen lines from the spectrum of a telluric standard,
using a model of vega's atmosphere.
"""
# File for recording shift/scale from calls to "telluric"
telluric_shift_scale_record = open('telluric_hlines.txt', 'w')
# Remove H lines from standard star correction spectrum
no_hline = False
if os.path.exists("final_tel_no_hlines_no_norm.fits"):
if over:
iraf.delete("final_tel_no_hlines_no_norm.fits")
else:
no_hline = True
logging.info("Output file exists and -over- not set - skipping H line removal")
if hline_method == "vega" and not no_hline:
vega(combined_extracted_1d_spectra, grating, path, hlineinter, telluric_shift_scale_record, log, over)
#if hline_method == "linefitAuto" and not no_hline:
# linefitAuto(combined_extracted_1d_spectra, grating)
# Disabled and untested because interactive scripted iraf tasks are broken...
#if hline_method == "linefitManual" and not no_hline:
# linefitManual(combined_extracted_1d_spectra+'[sci,1]', grating)
#if hline_method == "vega_tweak" and not no_hline:
#run vega removal automatically first, then give user chance to interact with spectrum as well
# vega(combined_extracted_1d_spectra,grating, path, hlineinter, telluric_shift_scale_record, log, over)
# linefitManual("final_tel_no_hlines_no_norm", grating)
#if hline_method == "linefit_tweak" and not no_hline:
#run Lorentz removal automatically first, then give user chance to interact with spectrum as well
# linefitAuto(combined_extracted_1d_spectra,grating)
# linefitManual("final_tel_no_hlines_no_norm", grating)
if hline_method == "none" and not no_hline:
#need to copy files so have right names for later use
iraf.imcopy(input=combined_extracted_1d_spectra+'[sci,'+str(1)+']', output="final_tel_no_hlines_no_norm", verbose='no')
# Plot the non-hline corrected spectrum and the h-line corrected spectrum.
uncorrected = astropy.io.fits.open(combined_extracted_1d_spectra+'.fits')[1].data
corrected = astropy.io.fits.open("final_tel_no_hlines_no_norm.fits")[0].data
if hlineinter or tempInter:
plt.title('Before and After HLine Correction')
plt.plot(uncorrected)
plt.plot(corrected)
plt.show()
def vega(spectrum, band, path, hlineinter, telluric_shift_scale_record, log, over, airmass=1.0):
"""
Use iraf.telluric to remove H lines from standard star, then remove
normalization added by telluric with iraf.imarith.
The extension for vega_ext.fits is specified from band (from header of
telluricfile.fits).
Args:
spectrum (string): filename from 'telluricfile'.
band: from telluricfile .fits header. Eg 'K', 'H', 'J'.
path: usually top directory with Nifty scripts.
hlineinter (boolean): Interactive H line fitting. Specified with -i at
command line. Default False.
airmass: from telluricfile .fits header.
telluric_shift_scale_record: "pointer" to telluric_hlines.txt.
log: path to logfile.
over (boolean): overwrite old files. Specified at command line.
"""
if band=='K':
ext = '1'
sample = "21537:21778"
scale = 0.8
if band=='H':
ext = '2'
sample = "16537:17259"
scale = 0.7
if band=='J':
ext = '3'
sample = "11508:13492"
scale = 0.885
if band=='Z':
ext = '4'
sample = "*"
scale = 0.8
if os.path.exists("tell_nolines.fits"):
if over:
os.remove("tell_nolines.fits")
tell_info = iraf.telluric(input=spectrum+"[1]", output='tell_nolines', cal= RUNTIME_DATA_PATH+'vega_ext.fits['+ext+']', xcorr='yes', tweakrms='yes', airmass=airmass, inter=hlineinter, sample=sample, threshold=0.1, lag=3, shift=0., dshift=0.05, scale=scale, dscale=0.05, offset=0., smooth=1, cursor='', mode='al', Stdout=1)
else:
logging.info("Output file exists and -over not set - skipping H line correction")
else:
tell_info = iraf.telluric(input=spectrum+"[1]", output='tell_nolines', cal= RUNTIME_DATA_PATH+'vega_ext.fits['+ext+']', xcorr='yes', tweakrms='yes', inter=hlineinter, airmass=airmass, sample=sample, threshold=0.1, lag=3, shift=0., dshift=0.05, scale=scale, dscale=0.05, offset=0., smooth=1, cursor='', mode='al', Stdout=1)
# need this loop to identify telluric output containing warning about pix outside calibration limits (different formatting)
if "limits" in tell_info[-1].split()[-1]:
norm=tell_info[-2].split()[-1]
else:
norm=tell_info[-1].split()[-1]
if os.path.exists("final_tel_no_hlines_no_norm.fits"):
if over:
os.remove("final_tel_no_hlines_no_norm.fits")
iraf.imarith(operand1='tell_nolines', op='/', operand2=norm, result='final_tel_no_hlines_no_norm', title='', divzero=0.0, hparams='', pixtype='', calctype='', verbose='yes', noact='no', mode='al')
else:
logging.info("Output file exists and -over not set - skipping H line normalization")
else:
iraf.imarith(operand1='tell_nolines', op='/', operand2=norm, result='final_tel_no_hlines_no_norm', title='', divzero=0.0, hparams='', pixtype='', calctype='', verbose='yes', noact='no', mode='al')
# TODO(nat): linefitAuto and linefitManual could be useful at some point.
def linefitAuto(spectrum, band):
"""automatically fit Lorentz profiles to lines defined in existing cur* files
Go to x position in cursor file and use space bar to find spectrum at each of those points
"""
specpos = iraf.bplot(images=spectrum+'[SCI,1]', cursor='cur'+band, Stdout=1, StdoutG='/dev/null')
specpose = str(specpos).split("'x,y,z(x):")
nextcur = 'nextcur'+band+'.txt'
# Write line x,y info to file containing Lorentz fitting commands for bplot
write_line_positions(nextcur, specpos)
iraf.delete('final_tel_no_hlines_no_norm.fits',ver="no",go_ahead='yes',Stderr='/dev/null')
# Fit and subtract Lorentz profiles. Might as well write output to file.
iraf.bplot(images=spectrum+'[sci,1]',cursor='nextcur'+band+'.txt', new_image='final_tel_no_hlines_no_norm', overwrite="yes",StdoutG='/dev/null',Stdout='Lorentz'+band)
def linefitManual(spectrum, band):
""" Enter splot so the user can fit and subtract lorents (or, actually, any) profiles
"""
iraf.splot(images=spectrum, new_image='final_tel_no_hlines_no_norm', save_file='../PRODUCTS/lorentz_hlines.txt', overwrite='yes')
# it's easy to forget to use the 'i' key to actually write out the line-free spectrum, so check that it exists:
# with the 'tweak' options, the line-free spectrum will already exists, so this lets the user simply 'q' and move on w/o editing (too bad if they edit and forget to hit 'i'...)
while True:
try:
with open("final_tel_no_hlines_no_norm.fits") as f: pass
break
except IOError as e:
logging.info("It looks as if you didn't use the i key to write out the lineless spectrum. We'll have to try again. --> Re-entering splot")
iraf.splot(images=spectrum, new_image='final_tel_no_hlines_no_norm', save_file='../PRODUCTS/lorentz_hlines.txt', overwrite='yes')
def fitContinuum(continuuminter, tempInter, grating):
"""
Fit a continuum to the telluric correction spectrum to normalize it. The continuum
fitting regions were derived by eye and can be improved.
Results are in fit<Grating>.fits
"""
# These were found to fit the curves well by hand. You can probably improve them; feel free to fiddle around!
if grating == "K":
order = 5
sample = "20279:20395,20953:24283"
elif grating == "J":
order = 5
sample = "11561:12627,12745:12792,12893:13566"
elif grating == "H":
order = 5
sample = "*"
elif grating == "Z":
order = 5
sample = "9453:10015,10106:10893,10993:11553"
if os.path.exists("fit.fits"):
os.remove("fit.fits")
iraf.continuum(input='final_tel_no_hlines_no_norm',output='fit',ask='yes',lines='*',bands='1',type="fit",replace='no',wavescale='yes',logscale='no',override='no',listonly='no',logfiles='',inter=continuuminter,sample=sample,naverage=1,func='spline3',order=order,low_rej=1.0,high_rej=3.0,niterate=2,grow=1.0,markrej='yes',graphics='stdgraph',cursor='',mode='ql')
# Plot the telluric correction spectrum with the continuum fit.
final_tel_no_hlines_no_norm = astropy.io.fits.open('final_tel_no_hlines_no_norm.fits')[0].data
fit = astropy.io.fits.open('fit.fits')[0].data
if continuuminter or tempInter:
plt.title('Unnormalized Telluric Correction and Continuum fit Used to Normalize')
plt.plot(final_tel_no_hlines_no_norm)
plt.plot(fit)
plt.show()
def divideByContinuum(inputSpectra, divisor, )
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.