repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
migueldiascosta/pymatgen | docs/_static/pmg_install.py | 1 | 6875 | #!/usr/bin/env python
"""
This is a simple installation script for casual users of pymatgen who simply
plan to use pymatgen as a basic analysis library and is not planning to
develop on it. This script should work on most Linux and Mac systems that
have Python 2.7+ installed and setuptools installed. These are the only
required pre-requisites. Once those are installed, the script should take
care of the remainder of the installation process.
There are only a few options in this script. Please note that you probably
have to *run all commands with sudo* for the installation to proceed correctly.
Simply running:
./pmg_install
will install pymatgen with the basic dependencies.
Running:
./pmg_install -f
will install pymatgen with a few more optional packages and also start an
initial setup process that guides you through basic configuration
for POTCAR and Materials API support.
Report any issues or suggestions for this script to [email protected].
"""
__author__ = "Shyue Ping Ong"
__version__ = "1.0"
__email__ = "[email protected]"
__date__ = "Apr 28, 2013"
import sys
import subprocess
import urllib
import os
import shutil
def build_enum(fortran_command="gfortran"):
enumlib_url = "http://downloads.sourceforge.net/project/enum/enum/enum.tar.gz"
currdir = os.getcwd()
state = True
try:
os.makedirs("enumlib")
os.chdir("enumlib")
urllib.urlretrieve(enumlib_url, "enum.tar.gz")
subprocess.call(["tar", "-zxf", "enum.tar.gz"])
os.chdir("celib")
os.chdir("trunk")
os.environ["F90"] = fortran_command
subprocess.call(["make"])
os.chdir(os.path.join("..", ".."))
enumpath = os.path.join("enumlib", "trunk")
os.chdir(enumpath)
subprocess.call(["make"])
for f in ["multienum.x", "makestr.x"]:
subprocess.call(["make", f])
shutil.copy(f, os.path.join("..", "..", ".."))
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
shutil.rmtree("enumlib")
return state
def build_bader(fortran_command="gfortran"):
bader_url = "http://theory.cm.utexas.edu/henkelman/code/bader/download/bader.tar.gz"
currdir = os.getcwd()
state = True
try:
urllib.urlretrieve(bader_url, "bader.tar.gz")
subprocess.call(["tar", "-zxf", "bader.tar.gz"])
os.chdir("bader")
subprocess.call(["cp", "makefile.osx_"+fortran_command, "makefile"])
subprocess.call(["make"])
shutil.copy("bader", os.path.join("..", "bader_exe"))
os.chdir("..")
shutil.rmtree("bader")
os.remove("bader.tar.gz")
shutil.move("bader_exe", "bader")
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
return state
try:
py_ver = sys.version_info
print("Detected Python version %s" % ".".join(["%s" % i for i in py_ver]))
if py_ver < (2, 7) or py_ver >= (2, 8):
print("Python version 2.7+ required. Download and install the necessary "
"python version from http://www.python.org/download/.")
sys.exit(-1)
except:
print("Python version 2.7+ required. Download and install the necessary "
"python version from http://www.python.org/download/.")
sys.exit(-1)
try:
import setuptools
print("Detected setuptools version {}".format(setuptools.__version__))
except ImportError:
print("setuptools not detected. Get it from https://pypi.python"
".org/pypi/setuptools and follow the instructions to install first.")
sys.exit(-1)
try:
gcc_ver = subprocess.Popen(["gcc", "--version"], stdout=subprocess.PIPE)\
.communicate()[0]
except:
print("gcc not found in PATH. gcc is needed for installation of numpy "
"and C extensions. For Mac users, please install Xcode and its "
"corresponding command-line tools first.")
sys.exit(-1)
try:
import pip
print("Detected pip version {}".format(pip.__version__))
except ImportError:
print("pip not detected. Installing...")
subprocess.call(["easy_install", "pip"])
try:
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs
print("Detected numpy version {}".format(numpy.__version__))
except ImportError:
print("numpy.distutils.misc_util cannot be imported. Installing...")
subprocess.call(["pip", "install", "-q", "numpy>=1.8.0"])
from numpy.distutils.misc_util import get_numpy_include_dirs
for pk in ["pyhull>=1.3.6", "pyyaml", "PyCifRW>=3.3", "requests>=1.0",
"pybtex>=0.16"]:
print("Installing {}".format(pk))
ret = subprocess.call(["pip", "install", "-q", pk])
if ret != 0:
ret = subprocess.call(["easy_install", pk])
if ret != 0:
print("Error installing required dependency {}".format(pk))
sys.exit(-1)
print
if subprocess.call(["pip", "install", "pymatgen"]) != 0:
print("Error installing pymatgen")
sys.exit(-1)
print
enum = False
bader = False
if "-f" in sys.argv:
for pk in ["matplotlib>1.1"]:
if subprocess.call(["pip", "install", pk]) != 0:
print("Unable to install {}. Skipping...".format(pk))
if subprocess.call([
"pip", "install", "-Ivq",
"https://wiki.fysik.dtu.dk/ase-files/python-ase-3.6.0.2515.tar.gz"]
) != 0:
print("Unable to install ASE. Skipping...")
print
fortran_command = None
try:
if subprocess.call(["ifort", "--version"]) == 0:
print("Found ifort")
fortran_command = "ifort"
elif subprocess.call(["gfortran", "--version"]) == 0:
print("Found gfortran")
fortran_command = "gfortran"
except:
fortran_command = None
if fortran_command is not None:
print("Building enumlib")
enum = build_enum(fortran_command)
print
print("Building bader")
bader = build_bader(fortran_command)
print
else:
print("No fortran compiler found. Skipping enumlib and bader build.")
print("Performing POTCAR setup. Press Ctrl-C at any prompt to skip this "
"step.")
try:
subprocess.call(["potcar_setup"])
except:
print("Skipping POTCAR setup.")
print
print("------------ Setup complete --------------")
print("You still need to perform a few manual changes.")
print
if enum or bader:
print("Please add {} to your PATH or move the executables multinum.x, "
"makestr.x and bader to a location in your PATH."
.format(os.path.abspath(".")))
print
print("To use the Materials API, get your Materials API key at "
"https://www.materialsproject.org/profile and add it to your "
"environment")
print("export MAPI_KEY=YOUR_API_KEY")
print | mit |
McCabeJM/sklearn_pycon2015 | notebooks/fig_code/sgd_separator.py | 54 | 1148 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
def plot_sgd_separator():
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01,
n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
ax = plt.axes()
ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
ax.axis('tight')
if __name__ == '__main__':
plot_sgd_separator()
plt.show()
| bsd-3-clause |
thkhavi/leafangle_supplement | v_sorghum/scripts/calculate_eabs.py | 2 | 2603 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# SKT 10/2014
# Input:
# (a) output file (Eabs) from WriteTable of Caribu in OpenAlea
# Output:
# (a) file that contains Eabs (energy absorbed) per layer, x.
from pylab import *
import matplotlib.pyplot as plt
from operator import itemgetter
import scipy.stats as sp
from collections import defaultdict
import numpy as np
import sys
def consume(iterator, n):
collections.deque(itertools.islice(iterator, n))
if len(sys.argv) <= 1:
print("calculate_eabs.py input_from_caribu.ssv")
sys.exit()
if sys.argv[1] == "--help" or sys.argv[1] == "-h":
print("calculate_eabs.py input_from_caribu.ssv")
sys.exit()
try:
# read in LILI_caribu_input
LILI_caribu_input = [line.strip() for line in open(sys.argv[1])]
LILI_caribu_input = [element.split(' ') for element in LILI_caribu_input]
except IOError:
print("Cannot open target file. Please check your input:")
print("\t$ python calculate_eabs.py input_from_caribu.ssv")
sys.exit()
# write input into LILI_caribu_input
li_tsv_temp=[]
li_tsv_filtered=[]
for i in range(len(LILI_caribu_input)):
if i == 0:
continue # header
for j in range(len(LILI_caribu_input[i])):
li_tsv_temp.append(float(LILI_caribu_input[i][9])) # Eabs
li_tsv_temp.append(float(LILI_caribu_input[i][3])) # plant
li_tsv_temp.append(float(LILI_caribu_input[i][4])) # leaf ?
li_tsv_temp.append(float(LILI_caribu_input[i][5])) # area discretized
li_tsv_filtered.append(li_tsv_temp)
li_tsv_temp=[]
LILI_caribu_input = li_tsv_filtered
i_eabs=0.0
i_area=0.0
li_eabs=[]
# For each plant
for i in range (len(LILI_caribu_input)):
if i == len(LILI_caribu_input)-1:
i_eabs = i_eabs + LILI_caribu_input[i][0]
i_area = i_area + LILI_caribu_input[i][3]
li_tsv_temp.append(i_eabs)
li_eabs.append(li_tsv_temp)
continue
# for each phytomer (leaf or stem)
if (LILI_caribu_input[i][1] == LILI_caribu_input[i+1][1]):
if (LILI_caribu_input[i][2] ==LILI_caribu_input[i+1][2]):
i_eabs = i_eabs + LILI_caribu_input[i][0]
i_area = i_area + LILI_caribu_input[i][3]
else:
li_tsv_temp.append(i_eabs)
i_eabs=0.0
i_area=0.0
else:
i_eabs = i_eabs + LILI_caribu_input[i][0]
i_area = i_area + LILI_caribu_input[i][3]
li_tsv_temp.append(i_eabs)
i_eabs=0.0
i_area=0.0
li_eabs.append(li_tsv_temp)
li_tsv_temp=[]
print '\n'.join(('\t'.join(str(i) for i in item[0:])) for item in li_eabs)
| gpl-2.0 |
elijah513/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
Quantipy/quantipy | tests/test_chain_old.py | 1 | 12467 | import unittest
import os.path
import test_helper
import pandas as pd
from pandas.util.testing import assert_frame_equal
from quantipy.core.stack import Stack
from quantipy.core.chain import Chain
from quantipy.core.link import Link
from quantipy.core.helpers.functions import load_json
from quantipy.core.view_generators.view_maps import QuantipyViews
from quantipy.core.helpers import functions
class TestChainObject(unittest.TestCase):
def setUp(self):
self.path = './tests/'
self.path_chain = './temp.chain'.format(self.path)
# self.path = ''
project_name = 'Example Data (A)'
# Load Example Data (A) data and meta into self
name_data = '%s.csv' % (project_name)
path_data = '%s%s' % (self.path, name_data)
self.example_data_A_data = pd.DataFrame.from_csv(path_data)
name_meta = '%s.json' % (project_name)
path_meta = '%s%s' % (self.path, name_meta)
self.example_data_A_meta = load_json(path_meta)
# The minimum list of variables required to populate a stack with all single*delimited set variations
self.minimum = ['q2b', 'Wave', 'q2', 'q3', 'q5_1']
self.setup_stack_Example_Data_A()
self.setup_chains_Example_Data_A()
def test_save_chain(self):
self.setup_chains_Example_Data_A()
for chain in self.chains:
# Create a dictionary with the attribute structure of the chain
chain_attributes = chain.__dict__
chain_described = chain.describe()
# Save and then load a copy of the chain
chain.save(path=self.path_chain)
loaded_chain = Chain.load(self.path_chain)
# Ensure that we are not comparing the same variable (in memory)
self.assertNotEqual(id(chain), id(loaded_chain))
# Create a dictionary with the attribute structure of the chain
loaded_chain_attributes = loaded_chain.__dict__
loaded_chain_described = loaded_chain.describe()
# Confirm that the chains contain the same views
sort_order = ['data', 'filter', 'x', 'y', 'view']
if pd.__version__ == '0.19.2':
actual = chain_described.sort_values(sort_order).values.tolist()
expected = loaded_chain_described.sort_values(sort_order).values.tolist()
else:
actual = chain_described.sort(sort_order).values.tolist()
expected = loaded_chain_described.sort(sort_order).values.tolist()
self.assertSequenceEqual(actual, expected)
# Make sure that this is working by altering the loaded_stack_attributes
# and comparing the result. (It should fail)
# Change a 'value' in the dict
loaded_chain_attributes['name'] = 'SomeOtherName'
with self.assertRaises(AssertionError):
self.assertEqual(chain_attributes, loaded_chain_attributes)
# reset the value
loaded_chain_attributes['name'] = chain_attributes['name']
self.assertEqual(chain_attributes, loaded_chain_attributes)
# Change a 'key' in the dict
del loaded_chain_attributes['name']
loaded_chain_attributes['new_name'] = chain_attributes['name']
with self.assertRaises(AssertionError):
self.assertEqual(chain_attributes, loaded_chain_attributes)
# reset the value
del loaded_chain_attributes['new_name']
loaded_chain_attributes['name'] = chain_attributes['name']
self.assertEqual(chain_attributes, loaded_chain_attributes)
# Remove a key/value pair
del loaded_chain_attributes['name']
with self.assertRaises(AssertionError):
self.assertEqual(chain_attributes, loaded_chain_attributes)
# Cleanup
if os.path.exists('./tests/{0}.chain'.format(chain.name)):
os.remove('./tests/{0}.chain'.format(chain.name))
def test_auto_orientation(self):
fk = 'no_filter'
xk = self.minimum
yk = ['@'] + self.minimum
views = ['cbase', 'counts', 'c%']
# If multiple x and y keys are given without orient_on
# x-orientation chains are assumed.
chain = self.stack.get_chain(
name='y',
data_keys=self.stack.name,
filters=fk,
x=xk,
y=yk,
views=views
)
self.assertTrue(chain.orientation=='x')
def test_lazy_name(self):
fk = 'no_filter'
xk = self.minimum
yk = ['@'] + self.minimum
views = ['cbase', 'counts', 'c%']
# get chain but do not name - y orientation
chain_y = self.stack.get_chain(
data_keys=self.stack.name,
filters=fk,
x=xk,
y=yk[0],
views=views
)
# get chain but do not name - x orientation
chain_x = self.stack.get_chain(
data_keys=self.stack.name,
filters=fk,
x=xk[0],
y=yk,
views=views
)
# check lazy_name is working as it should be
self.assertEqual(chain_y.name, '[email protected]_1.cbase.counts.c%')
self.assertEqual(chain_x.name, '[email protected]_1.cbase.counts.c%')
def test_dervie_attributes(self):
# check chain attributes
self.assertEqual(self.chains[0].name, '@')
self.assertEqual(self.chains[0].orientation, 'y')
self.assertEqual(self.chains[0].source_name, '@')
self.assertEqual(self.chains[0].len_of_axis, 5)
self.assertEqual(self.chains[0].content_of_axis, ['q2b', 'Wave', 'q2', 'q3', 'q5_1'])
self.assertEqual(self.chains[0].views, ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%'])
self.assertEqual(self.chains[0].data_key, 'Example Data (A)')
self.assertEqual(self.chains[0].filter, 'no_filter')
self.assertEqual(self.chains[0].source_type, None)
self.assertEqual(self.chains[-1].name, 'q5_1')
self.assertEqual(self.chains[-1].orientation, 'x')
self.assertEqual(self.chains[-1].source_name, 'q5_1')
self.assertEqual(self.chains[-1].len_of_axis, 6)
self.assertEqual(self.chains[-1].content_of_axis, ['@', 'q2b', 'Wave', 'q2', 'q3', 'q5_1'])
self.assertEqual(self.chains[-1].views, ['x|f|x:|||cbase', 'x|f|:|||counts', 'x|f|:|y||c%'])
self.assertEqual(self.chains[-1].data_key, 'Example Data (A)')
self.assertEqual(self.chains[-1].filter, 'no_filter')
self.assertEqual(self.chains[-1].source_type, None)
def test_describe(self):
fk = 'no_filter'
for chain in self.chains:
chain_described = chain.describe()
#test describe() returns a dataframe
self.assertIsInstance(chain_described, pd.DataFrame)
#test descibe() returns the expected dataframe - *no args*
if chain.orientation == 'y':
keys = chain[self.stack.name][fk].keys()
views = chain[self.stack.name][fk][keys[0]][chain.source_name].keys()
data = [self.stack.name]*(len(keys)*len(views))
filters = [fk]*(len(keys)*len(views))
x = []
for key in keys:
x.extend([key]*len(views))
y = [chain.source_name]*(len(keys)*len(views))
view = [v for v in views]*len(keys)
ones = [1]*(len(keys)*len(views))
df = pd.DataFrame({'data': data,
'filter': filters,
'x': x,
'y': y,
'view': view,
'#': ones})
df = df[chain_described.columns.tolist()]
assert_frame_equal(chain_described, df)
elif chain.orientation == 'x':
keys = chain[self.stack.name][fk][chain.source_name].keys()
views = chain[self.stack.name][fk][chain.source_name][keys[0]].keys()
data = [self.stack.name]*(len(keys)*len(views))
filters = [fk]*(len(keys)*len(views))
y = []
for key in keys:
y.extend([key]*len(views))
x = [chain.source_name]*(len(keys)*len(views))
view = [v for v in views]*len(keys)
ones = [1]*(len(keys)*len(views))
df = pd.DataFrame({'data': data,
'filter': filters,
'x': x,
'y': y,
'view': view,
'#': ones})
df = df[chain_described.columns.tolist()]
assert_frame_equal(chain_described, df)
@classmethod
def tearDownClass(self):
self.stack = Stack("StackName")
filepath ='./tests/'+self.stack.name+'.stack'
if os.path.exists(filepath):
os.remove(filepath)
def is_empty(self, any_structure):
if any_structure:
#print('Structure is not empty.')
return False
else:
#print('Structure is empty.')
return True
def create_key_stack(self, branch_pos="data"):
""" Creates a dictionary that has the structure of the keys in the Stack
It is used to loop through the stack without affecting it.
"""
key_stack = {}
for data_key in self.stack:
key_stack[data_key] = {}
for the_filter in self.stack[data_key][branch_pos]:
key_stack[data_key][the_filter] = {}
for x in self.stack[data_key][branch_pos][the_filter]:
key_stack[data_key][the_filter][x] = []
for y in self.stack[data_key][branch_pos][the_filter][x]:
link = self.stack[data_key][branch_pos][the_filter][x][y]
if not isinstance(link, Link):
continue
key_stack[data_key][the_filter][x].append(y)
return key_stack
def setup_stack_Example_Data_A(self, fk=None, xk=None, yk=None, views=None, weights=None):
if fk is None:
fk = 'no_filter'
if xk is None:
xk = self.minimum
if yk is None:
yk = ['@'] + self.minimum
if views is None:
views = ['default', 'cbase', 'counts', 'c%']
if not isinstance(weights, list):
weights = [weights]
self.stack = Stack(name="Example Data (A)")
self.stack.add_data(
data_key=self.stack.name,
meta=self.example_data_A_meta,
data=self.example_data_A_data
)
for weight in weights:
self.stack.add_link(
data_keys=self.stack.name,
filters=fk,
x=xk,
y=yk,
views=QuantipyViews(views),
weights=weight
)
def setup_chains_Example_Data_A(self, fk=None, xk=None, yk=None, views=None, orient_on=None):
if fk is None:
fk = 'no_filter'
if xk is None:
xk = self.minimum
if yk is None:
yk = ['@'] + self.minimum
if views is None:
views = [
'x|f|x:|||cbase',
'x|f|:|||counts',
'x|f|:|y||c%'
]
self.chains = []
for y in yk:
self.chains.append(
self.stack.get_chain(
name=y,
data_keys=self.stack.name,
filters='no_filter',
x=xk,
y=y,
views=views
)
)
for x in xk:
self.chains.append(
self.stack.get_chain(
name=x,
data_keys=self.stack.name,
filters='no_filter',
x=x,
y=yk,
views=views
)
)
if __name__ == '__main__':
unittest.main()
| mit |
bmorris3/friedrich | friedrich/fitting.py | 1 | 16547 | # Licensed under the MIT License - see LICENSE.rst
"""
Methods for fitting transit light curves, spot occultations, or both, using
`scipy` minimizers and `emcee`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import emcee
from scipy import optimize, signal
import matplotlib.pyplot as plt
import batman
from copy import deepcopy
from emcee.utils import MPIPool
import sys
def gaussian(times, amplitude, t0, sigma):
"""
Gaussian function.
Parameters
----------
times : `numpy.ndarray`
Times
amplitude : float
Amplitude of gaussian (not normalized)
t0 : float
Central time in units of `times`
sigma : float
Gaussian width.
Returns
-------
y : `numpy.ndarray`
Gaussian evaluated at `times`
"""
return amplitude * np.exp(-0.5*(times - t0)**2/sigma**2)
def peak_finder_chi2(theta, x, y, yerr):
"""
Chi^2 model given parameters `theta` and data {`x`, `y`, `yerr`}
Parameters
----------
theta : list
Trial parameters
x : `numpy.ndarray`
Times [JD]
y : `numpy.ndarray`
Fluxes
yerr : `numpy.ndarray`
Uncertainties on fluxes
Returns
-------
chi2 : float
Chi^2 of the model
"""
model = summed_gaussians(x, theta)
return np.sum((y-model)**2/yerr**2)
def peak_finder(times, residuals, errors, transit_params, n_peaks=4,
plots=False, verbose=False, skip_priors=False):
"""
Find peaks in the residuals from a fit to a transit light curve, which
correspond to starspot occultations.
Parameters
----------
times : `numpy.ndarray`
Times [JD]
residuals : `numpy.ndarray`
Fluxes
errors : `numpy.ndarray`
Uncertainties on residuals
transit_params : `~batman.TransitParams`
Transit light curve parameters
n_peaks : bool (optional)
Number of peaks to search for. If more than `n_peaks` are found, return
only the `n_peaks` largest amplitude peaks.
plots : bool (optional)
Show diagnostic plots
verbose : bool (optional)
Warn if no peaks are found
Returns
-------
result_in_transit : list or `None`
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
for spots detected.
Notes
-----
Review of minimizers tried for `peak_finder`:
`~scipy.optimize.fmin` gets amplitudes right, but doesn't vary sigmas much.
For this reason, it tends to do a better job of finding nearby, semi-
overlapping spots.
`~scipy.optimize.fmin_powell` varies amplitudes and sigmas lots, but
as a result, sometimes two nearby spots are fit with one wide gaussian.
"""
# http://stackoverflow.com/a/25666951
# Convolve residuals with a gaussian, find relative maxima
n_points_kernel = 100
window = signal.general_gaussian(n_points_kernel+1, p=1, sig=10)
filtered = signal.fftconvolve(window, residuals)
filtered = (np.max(residuals) / np.max(filtered)) * filtered
filtered = np.roll(filtered, int(-n_points_kernel/2))[:len(residuals)]
maxes = signal.argrelmax(filtered)[0]
# Only take maxima, not minima
maxes = maxes[filtered[maxes] > 0]
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params)
maxes_in_transit = maxes[(times[maxes] < upper_t_bound) &
(times[maxes] > lower_t_bound)]
# Only take the `n_peaks` highest peaks
if len(maxes_in_transit) > n_peaks:
highest_maxes_in_transit = maxes_in_transit[np.argsort(filtered[maxes_in_transit])][-n_peaks:]
else:
highest_maxes_in_transit = maxes_in_transit
# plt.plot(times, filtered)
# plt.plot(times, residuals, '.')
# plt.plot(times[maxes_in_transit], filtered[maxes_in_transit], 'ro')
# [plt.axvline(times[m], color='k') for m in maxes]
# [plt.axvline(times[m], color='m') for m in maxes_in_transit]
# if len(maxes_in_transit) > n_peaks:
# [plt.axvline(times[m], color='b') for m in highest_maxes_in_transit]
# plt.axvline(upper_t_bound, color='r')
# plt.axvline(lower_t_bound, color='r')
# plt.show()
if len(maxes_in_transit) == 0:
if verbose:
print('no maxes found')
return None
peak_times = times[highest_maxes_in_transit]
peak_amplitudes = residuals[highest_maxes_in_transit]
peak_sigmas = np.zeros(len(peak_times)) + 2./60/24 # 3 min
input_parameters = np.vstack([peak_amplitudes, peak_times,
peak_sigmas]).T.ravel()
result = optimize.fmin_powell(peak_finder_chi2, input_parameters,
disp=False, args=(times, residuals, errors),
xtol=0.00001, ftol=0.00001)
# if np.all(result == input_parameters):
# print('oh no!, fmin didnt produce a fit')
# Only use gaussians that occur in transit (fmin fit is unbounded in time)
# and amplitude is positive:
split_result = np.split(result, len(input_parameters)/3)
result_in_transit = []
for amplitude, t0, sigma in split_result:
depth = transit_params.rp**2
trial_params = np.array([amplitude, t0, sigma])
if not np.isinf(lnprior(trial_params, residuals, lower_t_bound,
upper_t_bound, transit_params, skip_priors)):
result_in_transit.extend([amplitude, t0, np.abs(sigma)])
result_in_transit = np.array(result_in_transit)
if len(result_in_transit) == 0:
return None
if plots:
fig, ax = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
ax[0].errorbar(times, residuals, fmt='.', color='k')
[ax[0].axvline(t) for t in result_in_transit[1::3]]
ax[0].plot(times, summed_gaussians(times, input_parameters), 'r')
ax[0].axhline(0, color='k', ls='--')
ax[0].set_ylabel('Transit Residuals')
ax[1].errorbar(times, residuals, fmt='.', color='k')
ax[1].plot(times, summed_gaussians(times, result_in_transit), 'r')
ax[1].axhline(0, color='k', ls='--')
ax[1].set_ylabel('Residuals')
ax[2].errorbar(times,
residuals - summed_gaussians(times, result_in_transit),
fmt='.', color='k')
#ax[1].errorbar(times, gaussian_model, fmt='.', color='r')
ax[2].axhline(0, color='k', ls='--')
ax[2].set_ylabel('Residuals')
for axis in ax:
axis.axvline(upper_t_bound, color='r')
axis.axvline(lower_t_bound, color='r')
fig.tight_layout()
plt.show()
return result_in_transit
def generate_lc(times, transit_params):
"""
Make a transit light curve.
Parameters
----------
times : `numpy.ndarray`
Times in JD
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
model_flux : `numpy.ndarray`
Fluxes from model transit light curve
"""
exp_time = 1./60/24 # 1 minute cadence -> [days]
m = batman.TransitModel(transit_params, times, supersample_factor=7,
exp_time=exp_time)
model_flux = m.light_curve(transit_params)
return model_flux
def summed_gaussians(times, spot_parameters):
"""
Take a list of gaussian input parameters (3 parameters per gaussian), make
a model of the sum of all of those gaussians.
Parameters
----------
times : `numpy.ndarray`
Times in JD
spot_parameters : list
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
Returns
-------
model : `numpy.ndarray`
Sum of gaussians
"""
model = np.zeros(len(times), dtype=np.float128)
if spot_parameters is not None and len(spot_parameters) % 3 == 0:
split_input_parameters = np.split(np.array(spot_parameters),
len(spot_parameters)/3)
for amplitude, t0, sigma in split_input_parameters:
model += gaussian(times, amplitude, t0, sigma)
return model
def get_in_transit_bounds(times, params, duration_fraction=0.9):
"""
Approximate the boundaries of "in-transit" for tranist occuring
during times `times`.
Parameters
----------
times : `numpy.ndarray`
Times in JD
params : `~batman.TransitParams`
Transit light curve parameters
duration_fraction : float
Fraction of the full transit duration to consider "in-transit"
Returns
-------
lower_t_bound : float
Earliest in-transit time [JD]
upper_t_bound : float
Latest in-transit time [JD]
"""
phased = (times - params.t0) % params.per
near_transit = ((phased < params.duration*(0.5*duration_fraction)) |
(phased > params.per -
params.duration*(0.5*duration_fraction)))
if np.count_nonzero(near_transit) == 0:
near_transit = 0
return times[near_transit].min(), times[near_transit].max()
def lnprior(theta, y, lower_t_bound, upper_t_bound, transit_params,
skip_priors):
"""
Log prior for `emcee` runs.
Parameters
----------
theta : list
Fitting parameters
y : `numpy.ndarray`
Fluxes
lower_t_bound : float
Earliest in-transit time [JD]
upper_t_bound : float
Latest in-transit time [JD]
skip_priors : bool
Should the priors be skipped?
Returns
-------
lnpr : float
Log-prior for trial parameters `theta`
"""
spot_params = theta
amplitudes = spot_params[::3]
t0s = spot_params[1::3]
sigmas = spot_params[2::3]
depth = transit_params.rp**2
min_sigma = 1.5/60/24
max_sigma = transit_params.duration # 6.0e-3 # upper_t_bound - lower_t_bound
t0_ok = ((lower_t_bound < t0s) & (t0s < upper_t_bound)).all()
sigma_ok = ((min_sigma < sigmas) & (sigmas < max_sigma)).all()
if not skip_priors:
amplitude_ok = ((0 <= amplitudes) & (amplitudes < depth)).all()
else:
amplitude_ok = (amplitudes >= 0).all()
if amplitude_ok and t0_ok and sigma_ok:
return 0.0
return -np.inf
def lnlike(theta, x, y, yerr, transit_params, skip_priors=False):
"""
Log-likelihood of data given model.
Parameters
----------
theta : list
Trial parameters
x : `numpy.ndarray`
Times in JD
y : `numpy.ndarray`
Fluxes
yerr : `numpy.ndarray`
Uncertainties on fluxes
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
lnp : float
Log-likelihood of data given model, i.e. ln( P(x | theta) )
"""
model = spotted_transit_model(theta, x, transit_params, skip_priors)
return -0.5*np.sum((y-model)**2/yerr**2)
def lnprob(theta, x, y, yerr, lower_t_bound, upper_t_bound, transit_params,
skip_priors):
"""
Log probability.
Parameters
----------
theta : list
Trial parameters
x : `numpy.ndarray`
Times in JD
y : `numpy.ndarray`
Fluxes
yerr : `numpy.ndarray`
Uncertainties on fluxes
lower_t_bound : float
Earliest in-transit time [JD]
upper_t_bound : float
Latest in-transit time [JD]
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
"""
lp = lnprior(theta, y, lower_t_bound, upper_t_bound, transit_params,
skip_priors)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr, transit_params, skip_priors)
def spotted_transit_model(theta, times, transit_params, skip_priors=False):
"""
Compute sum of spot model and transit model
Parameters
----------
theta : list
Trial parameters
times : `numpy.ndarray`
Times in JD
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
f : `numpy.ndarray`
Model fluxes
"""
spot_params = theta
# Set depth according to input parameters, compute transit model
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params,
duration_fraction=1.0)
transit_model = generate_lc(times, transit_params)
spot_model = summed_gaussians(times, spot_params)
# Sum the models only where planet is in transit
transit_plus_spot_model = transit_model
in_transit_times = (times < upper_t_bound) & (times > lower_t_bound)
transit_plus_spot_model[in_transit_times] += spot_model[in_transit_times]
if not skip_priors:
# Force all model fluxes <=1
transit_plus_spot_model[transit_plus_spot_model > 1] = 1.0
return transit_plus_spot_model
def spotted_transit_model_individuals(theta, times, transit_params):
"""
Compute sum of each spot model and the transit model individually,
return a list of each.
Parameters
----------
theta : list
Trial parameters
times : `numpy.ndarray`
Times in JD
transit_params : `~batman.TransitParams`
Transit light curve parameters
Returns
-------
f_list : list
List of model fluxes
"""
spot_params = theta
split_spot_params = np.split(spot_params, len(spot_params)/3)
return [spotted_transit_model(spot_params, times, transit_params)
for spot_params in split_spot_params]
def run_emcee_seeded(light_curve, transit_params, spot_parameters, n_steps,
n_walkers, output_path, burnin=0.7,
n_extra_spots=1, skip_priors=False):
"""
Fit for transit depth and spot parameters given initial guess informed by
results from `peak_finder`
Parameters
----------
light_curve : `friedrich.lightcurve.TransitLightCurve`
Light curve to fit
transit_params : `~batman.TransitParams`
Transit light curve parameters
spot_parameters : list
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
n_steps : int
Number of MCMC steps to take
n_walkers : int
Number of MCMC walkers to initialize (must be even, more than twice the
number of free params in fit)
output_path : str
Path to HDF5 archive output for storing results
burnin : float
Fraction of total number of steps to save to output (will truncate
the first `burnin` of the light curve)
n_extra_spots : int
Add `n_extra_spots` extra spots to the fit to soak up spots not
predicted by `peak_finder`
skip_priors : bool
Should a prior be applied to the depth parameter?
Returns
-------
sampler : `emcee.EnsembleSampler`
Sampler object returned by `emcee`
"""
times = light_curve.times.jd
fluxes = light_curve.fluxes
errors = light_curve.errors
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params)
amps = spot_parameters[::3]
init_depth = transit_params.rp**2
extra_spot_params = [0.1*np.min(amps), np.mean(times),
0.05*(upper_t_bound-lower_t_bound)]
fit_params = np.concatenate([spot_parameters,
n_extra_spots*extra_spot_params])
ndim, nwalkers = len(fit_params), n_walkers
pos = []
while len(pos) < nwalkers:
realization = fit_params + 1e-5*np.random.randn(ndim)
if not np.isinf(lnprior(realization, fluxes, lower_t_bound,
upper_t_bound, transit_params, skip_priors)):
pos.append(realization)
print('Begin MCMC...')
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(times, fluxes, errors, lower_t_bound,
upper_t_bound, transit_params,
skip_priors),
pool=pool)
sampler.run_mcmc(pos, n_steps)
print('Finished MCMC...')
pool.close()
burnin_len = int(burnin*n_steps)
from .storage import create_results_archive
create_results_archive(output_path, light_curve, sampler, burnin_len, ndim)
return sampler
| mit |
xpspectre/multiple-myeloma | prep_baseline_clinical_data.py | 1 | 3680 | # Run this after prep_clinical_data.py
import os
import pandas as pd
import numpy as np
# from fancyimpute import KNN, MICE
data_dir = 'data/processed'
# Load study data - to keep just the CoMMpass patients
study_data = pd.read_csv(os.path.join(data_dir, 'patient_study.csv'))
study_data.set_index('PUBLIC_ID', inplace=True)
# Load demographic data
demo_data = pd.read_csv(os.path.join(data_dir, 'patient_data.csv'))
demo_data.set_index('PUBLIC_ID', inplace=True)
# Load visit data
visit_data = pd.read_csv(os.path.join(data_dir, 'clinical_data.csv'))
# Select just the baseline/screening data
visit_data = visit_data.loc[visit_data['VISIT'] <= 0]
# Combine rows for each patient
# Averaging the rows takes care of missing data/NaNs properly
# unique_ids = data['PUBLIC_ID'].unique()
# print(unique_ids)
visit_data = visit_data.groupby('PUBLIC_ID').mean()
visit_data.drop(['VISIT', 'VISITDY'], axis=1, inplace=True)
# Combine demographic and visit data
data = demo_data
data = data.join(visit_data)
# Only keep CoMMpass patients
data = data[study_data['STUDY_ID'] == 1]
# Drop cols that represent change over last visit
data.drop(['AT_INCREASEOF25F', 'AT_SERUMMCOMPONE', 'AT_URINEMCOMPONE', 'AT_ONLYINPATIENT', 'AT_ONLYINPATIENT2', 'AT_DEVELOPMENTOF'], axis=1, inplace=True)
# Keep only the cols that have >= threashold % entries present
# Set this to determine how much we have to consider missing data
# A smallish amount of missing data should be pretty straightforward imputation
# More missing data is harder
# Which cols can be used for regression of missing data? (if our method requires that)
keep_thres = 0.5
cols = list(data)
present = []
N, N_cols = data.shape
for col in cols:
n = pd.notnull(data[col]).sum()
present.append(float(n)/N)
present = np.array(present)
drop_cols = np.where(present < keep_thres)[0]
data.drop(data.columns[drop_cols], axis=1, inplace=True)
print('Dropped {n}/{N} cols that had less than {x} frac of values'.format(n=drop_cols.size, N=N_cols, x=keep_thres))
# Load endpoints and join/split with data
endp_data = pd.read_csv(os.path.join(data_dir, 'patient_endp.csv'))
endp_data.set_index('PUBLIC_ID', inplace=True)
endp_cols = list(endp_data)
data_ = data
data_ = data_.join(endp_data)
data = data_.drop(endp_cols, axis=1)
endp_data = data_[endp_cols]
# Save combined baseline patient data
data.to_csv(os.path.join(data_dir, 'baseline_clinical_data.csv'))
endp_data.to_csv(os.path.join(data_dir, 'baseline_clinical_endp.csv'))
# Impute missing data
# If all the cols are allowed to be treated as numeric vals, then this is OK as is
# Otherwise, if some cols still need to be categorical/indicator, then threshold and convert
# Not sure if these funs below are supposed to return multiple datasets?
# May want to recombine categorical cols into 1 col, then multinomial or softmax logistic regression on them in MI,
# then resplit
do_mi = False
if do_mi:
cols = list(data)
inds = data.index.values
X = data.as_matrix()
X_filled_knn = KNN(k=3).complete(X)
data_filled_knn = pd.DataFrame(data=X_filled_knn, columns=cols)
data_filled_knn.insert(0, 'PUBLIC_ID', inds)
data_filled_knn.set_index('PUBLIC_ID', inplace=True)
X_filled_mice = MICE().complete(X)
data_filled_mice = pd.DataFrame(data=X_filled_mice, columns=cols)
data_filled_mice.insert(0, 'PUBLIC_ID', inds)
data_filled_mice.set_index('PUBLIC_ID', inplace=True)
# Save imputed data ready for standard analysis
data_filled_knn.to_csv(os.path.join(data_dir, 'baseline_clinical_data_imputed_knn.csv'))
data_filled_mice.to_csv(os.path.join(data_dir, 'baseline_clinical_data_imputed_mice.csv'))
| mit |
samuel1208/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
AWPorter/aima-python | grading/bayesian-submissions.py | 15 | 2415 | import importlib
import traceback
from grading.util import roster, print_table
# from logic import FolKB
# from utils import expr
import os
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
def indent(howMuch = 1):
space = ' '
for i in range(1, howMuch):
space += ' '
return space
def printKB(label, kb):
print(indent(), label + ' example:')
print(indent(2), 'knowledge base:')
for clause in kb.clauses:
print(indent(3), str(clause))
def printResults(query, gen, limit=3):
for count in range(limit):
try:
long = next(gen)
except StopIteration:
print()
return
short = {}
for v in long:
if v in query.args:
short[v] = long[v]
print(short, end=' ')
print('...')
def tryOne(label, frame):
fit = gnb.fit(frame.data, frame.target)
print('')
print_table(fit.theta_,
header=[frame.feature_names],
topLeft=['Means:'],
leftColumn=frame.target_names,
numfmt='%6.3f',
njust='center',
tjust='rjust',
)
y_pred = fit.predict(frame.data)
print("Number of mislabeled points out of a total %d points : %d"
% (len(frame.data), (frame.target != y_pred).sum()))
def tryExamples(examples):
for label in examples:
tryOne(label, examples[label])
submissions = {}
scores = {}
message1 = 'Submissions that compile:'
root = os.getcwd()
for student in roster:
try:
os.chdir(root + '/submissions/' + student)
# http://stackoverflow.com/a/17136796/2619926
mod = importlib.import_module('submissions.' + student + '.myBayes')
submissions[student] = mod.Examples
message1 += ' ' + student
except ImportError:
pass
except:
traceback.print_exc()
os.chdir(root)
print(message1)
print('----------------------------------------')
for student in roster:
if not student in submissions.keys():
continue
scores[student] = []
try:
examples = submissions[student]
print('Bayesian Networks from:', student)
tryExamples(examples)
except:
traceback.print_exc()
print(student + ' scores ' + str(scores[student]) + ' = ' + str(sum(scores[student])))
print('----------------------------------------')
| mit |
dalejung/pandas-composition | pandas_composition/test/test_series.py | 1 | 4278 | from unittest import TestCase
import pickle
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pandas_composition as composition
UserSeries = composition.UserSeries
from pandas_composition.util.tempdir import TemporaryDirectory
class TestSeries(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_timeseries_vs_series(self):
"""
Due to the auto changing of Series to TimeSeries when
having a DatetimeIndex my _wrap check had a problem
with it's direct check. Technically, UserSeries has
two pandas types, pd.Series and pd.TimeSeries
"""
class SubSeries(UserSeries):
pass
# check pd.Series
s = SubSeries(range(10))
bools = s > 0
assert type(bools) is SubSeries
# check TimeSeries
ind = pd.date_range(start="2000", freq="D", periods=10)
s = SubSeries(range(10), index=ind)
bools = s > 0
assert type(bools) is SubSeries
def test_series_pickle(self):
"""
Test that the UserSeries pickles correctly
"""
s = UserSeries(range(10))
s.frank = '123'
with TemporaryDirectory() as td:
fn = td + '/test.save'
with open(fn, 'wb') as f:
pickle.dump(s, f, protocol=0)
with open(fn, 'rb') as f:
test = pickle.load(f)
tm.assert_almost_equal(s, test)
assert isinstance(test, UserSeries)
assert test.frank == '123'
def test_init_args(self):
"""
Support init params for things like `series + 1`. While metadata propogates,
currently (2013/07/01) wrapping fails because it calls the constructor instead
of calling .view
"""
class SubSeries(UserSeries):
def __init__(self, *args, **kwargs):
bob = kwargs.pop('bob')
self.bob = bob
super(SubSeries, self).__init__(*args, **kwargs)
ss = SubSeries(range(10), bob=123)
assert ss.bob == 123
test = ss + 1 # currently errors
assert test.bob == 123
def test_init_args_set_meta_check(self):
"""
Support init params for things like `series + 1`. While metadata propogates,
currently (2013/07/01) wrapping fails because it calls the constructor instead
of calling .view
"""
class SubSeries(UserSeries):
def __init__(self, *args, **kwargs):
bob = kwargs.pop('bob')
self.bob = bob
super(SubSeries, self).__init__(*args, **kwargs)
ss = SubSeries(range(10), bob=123)
assert ss.bob == 123
# pandas constructors vars go to pandas object
# this is due to the fact that pandas sets its init args as
# member variables
ss.copy = True
assert 'copy' not in ss.meta
assert 'copy' in ss.pobj.__dict__
try:
ss.set_meta('copy', False)
except:
pass
else:
assert False, 'copy should fail as it is a constructor arg'
def test_monkeyed_pandas_object(self):
"""
A monkey-patched method on base pandas object is callable
but will pass in that base type instead of the subclass
"""
return # not sure if this is error
def type_method(self):
return type(self)
pd.Series.type_method = type_method
class SubSeries(UserSeries):
pass
s = SubSeries(range(10))
t = s.type_method()
assert t is SubSeries
def test_np_where(self):
us = UserSeries(range(10))
bools = us > 5
tvals = np.repeat(1, len(us))
fvals = np.repeat(0, len(us))
wh = np.where(bools, tvals, fvals)
assert isinstance(wh, UserSeries)
def test_series_view(self):
"""
"""
us = UserSeries(range(10))
us.view('i8')
class SubSeries(UserSeries):
pass
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
| mit |
airanmehr/bio | Scripts/Miscellaneous/CSE280A/Assigment1.py | 1 | 3969 | '''
Copyleft Jan 15, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np; np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=60;pd.options.display.max_columns=50;pd.options.display.expand_frame_repr=False
import pylab as plt; import matplotlib as mpl
import os; home=os.path.expanduser('~') +'/'
path=home+'datasets/cse280a/'
from time import time
def problem3():
b=np.log(np.array([18,21,12,7,3,5])/200.)
A=np.array([[1, 1, 0, 0],
[1, 0, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 1]])
print 'Problem 3 Soulution:',np.exp(np.linalg.pinv(A).dot(b))
def fastPylogeny(A):
A=radixSort(A.T).values
cladeAncestor=0
for i in range(A.shape[0]):
if (A[cladeAncestor] == A[i]).all(): continue
if (A[cladeAncestor]* A[i]).sum(): #has an intersection
if ((1-A[cladeAncestor])* A[i]).sum()!=0:
return False
else:
cladeAncestor=i
return True
def problem4():
def loadData(fname):
return pd.DataFrame(map(lambda x: list(str.strip(x)), open(path+fname).readlines())).astype(int)
a=loadData('a1data{}.txt'.format(1))
print 'Problem 4',
print 'reading data...'
Data=[loadData('a1data{}.txt'.format(i)) for i in range(1,7)]
print 'testing for perfect pylogeny...'
times=[]
for i,a in enumerate(Data):
start=time()
testResult=fastPylogeny(a)
elapsed=time()-start
print 'Dataset {} does{} have a perfect phylogeny. Done in {:.1f} secs, mn={:.0e} ).'.format(i+1,(' NOT','')[testResult],elapsed, a.shape[0]*a.shape[0])
times.append((a.shape[0]*a.shape[1],elapsed))
a=pd.DataFrame(times); a.set_index(0,inplace=True);
a.plot(style='-o',logx=True,logy=True,grid=True,legend=False,linewidth=2,markersize=10);plt.xlabel('n x m');plt.ylabel('Time');plt.xlim(80,1.2*Data[-1].shape[0]*Data[-1].shape[0])
plt.show()
def radixSort(A):
idx=np.arange(A.shape[0])
for j in range(A.shape[1])[::-1]:
idx=idx[np.append(np.where(A.iloc[idx,j].values)[0],np.where(1-A.iloc[idx,j].values)[0])]
return A.iloc[idx]
# problem4()
def getLevelTree(a):
def getBranch(a,mu,allele=0):
idx=a.iloc[:,mu]==allele
if idx.sum():
return getLevelTree(a.ix[idx,a.columns!=a.columns[mu]])
else:
return None,None
if a.shape[0]==1: return [a.index[0],None,None], None
a=a.ix[:,a.sum(0)!=a.shape[0]]
root = a.index[a.sum(1) == 0][0]
# print ','.join([root] + list(a.index[ a.duplicated()]))
a=a.loc[a.index!=root]
a.sum(0)
mu = a.sum(0).values.argsort()[-1]
left,leftg=getBranch(a, mu, allele=0)
right, rightg=getBranch(a, mu, allele=1)
if root=='12':
root='12,15'
left=None;leftg=None
graph={root:[]}
if left is not None : graph[root].append(str(left[0]))
if right is not None: graph[root].append(str(right[0]))
if leftg is not None: graph.update(leftg)
if rightg is not None: graph.update(rightg)
if root==1:
graph= {str(k):v for k,v in graph.items()}
print graph
return [root,left, right ],graph
def problem5():
a=pd.read_csv(path+'a1globedata.txt',sep=' ',header=0,index_col=0)
a.index=a.index.astype(str)
d=getLevelTree(a)[1]
import pygraphviz as pg
# d={'11': ['4'], '10': ['6', '5'], '13': ['11', '8'], '12,15': ['3'], '16': ['9'], '1': ['14', '7'], '2': ['12,15'], '4': ['10'], '7': ['16', '13'], '8': ['2']}
A=pg.AGraph(d,strict=False,directed=True)
A.node_attr['fontsize']='16'
A.node_attr['color']='goldenrod2'
A.node_attr['style']='filled'
A.layout(prog='dot')
A.draw(home+'migration.png')
problem3()
problem4()
problem5()
| mit |
BMJHayward/infusionsoft_xpmt | examples/scrap.py | 1 | 3586 | import glob
import os, os.path
import sys
import csv
import dataserv
import matplotchart as mpc
import statistics
import sqlite3
from datetime import date, datetime
import time
from dateutil import parser
def filelinecount(filename):
if type(filename) != str:
filename = str(filename)
with open(filename) as file:
for i, l in enumerate(file):
pass
return i + 1
def linecount():
numlines = []
for file in list(os.walk('.'))[0][2]:
if file.endswith('py'):
try:
numlines.append(filelinecount(file))
except: pass
return sum(numlines)
def plottest():
import matplotchart as mpc
import matplotlib.pyplot as plt
import pandas as pd
qdata = mpc.sqltopandas()
dates, values = qdata['Order Date'], qdata['Order Total']
mpc.plotdates(dates, values)
def datefromcsv(file):
reader = csv.DictReader(file, delimiter=';') #read the csv file
for row in reader:
date = datetime.strptime(row['Order Date'], '%Y-%m-%d %H:%M:%S') #datetime value in the right date format
values[date.strftime('%Y-%m-%d')] += 1 #increment the date with a step of 1
for date, value in sorted(values.items()):
result = (value / 3927.2) * 100 #Sla calcul with the theoritic number of line
print('Date: {}'.format(date)) #SLA display
print('Result: {}'.format(result))
class SQLtoPandas:
''' Basic SQL tasks in pandas DataFrames. '''
self.describe = self.df.describe()
'''SELECT * FROM <table> LIMIT 10'''
self.df[:10]
'''SELECT col1, col2, FROM <table> LIMIT 3'''
self.df[[col1, col2]][:3]
'''SELECT col1, col2, col3 FROM <table> ORDER BY col3 LIMIT 3'''
self.df.sort(col3)[[col1, col2, col3]][:3]
'''SELECT col1, col2, col3 FROM <table> ORDER BY col3 desc LIMIT 3'''
self.df.sort(col3, ascending=False)[[col1, col2, col3]][:3]
'''SELECT * FROM <table> WHERE col1='somevalue' or col1='someother'
ORDER BY col2 desc LIMIT 5;'''
self.df[(df[col1]=='somevalue' | (df[col1]=='someother')].sort(col2, ascending=False)[:5]
'''SELECT * FROM <table> WHERE col1 < 9000
ORDER BY col3 DESC LIMIT 1;'''
self.df[df[col1]<9000].sort(col3, ascending=False)[:1]
'''SELECT COUNT(DISTINCT(col1)) FROM <table>;'''
len(self.df(col1).unique())
'''SELECT DISTINCT(col1) FROM
(SELECT * FROM <table> ORDER BY col2 DESC LIMIT 20);'''
self.df[:20].sort(col2, ascending=False)[col1].unique()
'''SELECT col1, COUNT(col1) AS alt_col1_name FROM
(SELECT * FROM col2 ORDER BY col3 LIMIT 100)
GROUP BY col1 ORDER BY alt_col1_name DESC;'''
self.df[:100][col1].value_counts()
'''SELECT col1, AVG(col2), AVG(col3) FROM
(SELECT * FROM <table> ORDER BY col3 DESC LIMIT 100)
GROUP BY col1;'''
self.df.sort(col3)[:100].groupby(col1).mean()[[col2, col3]]
'''SELECT col1, COUNT(col1) FROM <table> GROUP BY col1;'''
self.df.groupby(col1).count()[table]
'''SELECT * FROM <table1> LEFT JOIN <table2>
ON <table1>.col1=<table2>.col1 LIMIT 1;'''
self.df1 = pd.read_csv('df1.csv')
self.df2 = pd.read_csv('df2.csv')
self.df1.merge(self.df2, on=col1)[:1]
def __init__(*csvfiles):
import pandas as pd
self.df_array = []
for csvfile in csvfile:
self.df = pd.read_csv(csvfile)
self.df.head()
self.df_array.append(self.df)
return self.df_array
if __name__ == "__main__":
x = linecount()
print(x)
| mit |
datapythonista/pandas | pandas/tests/frame/methods/test_count.py | 3 | 1081 | from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
class TestDataFrameCount:
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=range(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=range(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
| bsd-3-clause |
rehrlich/roary_analysis | plot_fsgm_results.py | 1 | 4288 | # Author: Rachel Ehrlich
# This program takes as input an output directory (where it expects to find fsgm
# output with the same nickname) and a nickname for the analysis. It creates
# two plots of the data, the likelihood of various N values and the number of
# expected new genes per strain sequenced.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import operator
import sys
# Input is a list of strings that are ints with possible white space
# Returns a list of ints
def strip_ints_list(data):
data = [x.strip() for x in data]
data = [int(x) for x in data if len(x) > 0]
return data
# input is the connand window file from the fsgm program
# returns the number of new, core and total genes for each number of genomes
def get_genes_per_genome_data(my_file):
in_core = False
core = list()
in_new_genes = False
new_genes = list()
in_total = False
total = list()
with open(my_file, 'rU') as f:
for line in f:
if line.startswith("core_stdv ="):
in_total = False
if in_total:
total.append(line)
if line.startswith("total ="):
in_new_genes = False
in_total = True
if in_new_genes:
new_genes.append(line)
if line.startswith("new ="):
in_core = False
in_new_genes = True
if in_core:
core.append(line)
if line.startswith("core ="):
in_core = True
return (core, new_genes, total)
# Input is a tuple with lists of counts for core, new and total genes, an
# output file name and the most likely value for n. Writes a pdf plot to the
# output file
def plot_genes_per_genome((core, new_genes, total), out_file, best_n=0):
num_genomes = range(1, len(new_genes) + 1)
with PdfPages(out_file) as pdf:
plt.plot(num_genomes, new_genes, 'ro', label='new')
plt.plot(num_genomes, total, 'bs', label='total')
plt.plot(num_genomes, core, 'g^', label='core')
if best_n > 0:
plt.axhline(best_n, color = 'k')
plt.annotate(s='N=' + str(best_n), xy=(5, best_n - 120))
plt.xlabel("Number of genomes")
plt.ylabel('Number of genes')
plt.title("Estimated size of distributed genome per strain sequenced")
plt.legend(loc=0, numpoints=1)
pdf.savefig()
plt.close()
# Input is the likelihood file from the fsgm program
# returns a list of n values and a lis of their likelihoods
def get_lik_data(my_file):
with open(my_file, 'rU') as f:
data = f.read()
split_data = [x.split('\t') for x in data.split('\n') if len(x) > 0]
n = [int(x[0]) for x in split_data]
lik = [float(x[1]) for x in split_data]
return n, lik
# Graphs the n against lik and saves the results to out_file
# Returns the most likely value of n
def plot_lik_vs_n(n, lik, out_file):
with PdfPages(out_file) as pdf:
plt.plot(n, lik, 'ko')
plt.xlabel("Number of genes")
plt.ylabel('Log likelihood')
max_index, max_lik = max(enumerate(lik), key=operator.itemgetter(1))
best_n = n[max_index]
label = '(' + str(best_n) + ' ,' + str(max_lik) + ')'
plt.annotate(s=label, xy=(best_n, max_lik),
xytext=(best_n + 500, max_lik - 40),
arrowprops=dict(facecolor='red', shrink=0.05))
plt.title("Number of genes in the pan genome")
pdf.savefig()
plt.close()
return best_n
def main():
out_dir = sys.argv[1]
nickname = sys.argv[2]
fsgm_lik_file = out_dir + "/N_vs_likelihood_" + nickname + ".txt"
fsgm_file = out_dir + "/CommandWindow_" + nickname + ".txt"
n, lik = get_lik_data(fsgm_lik_file)
out_file = out_dir + '/' + nickname + '_genes_in_pan_genome.pdf'
best_n = plot_lik_vs_n(n, lik, out_file)
genes_per_genome = get_genes_per_genome_data(fsgm_file)
genes_per_genome = [strip_ints_list(x) for x in genes_per_genome]
out_file = out_dir + '/' + nickname + '_new_genes_per_sequenced_genome.pdf'
plot_genes_per_genome(genes_per_genome, out_file, best_n)
if __name__ == "__main__":
main()
| gpl-2.0 |
dhuang/incubator-airflow | airflow/hooks/druid_hook.py | 1 | 5386 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import requests
import time
from pydruid.db import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
class DruidHook(BaseHook):
"""
Connection to Druid overlord for ingestion
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:type druid_ingest_conn_id: string
:param timeout: The interval between polling
the Druid job for the status of the ingestion job
:type timeout: int
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:type max_ingestion_time: int
"""
def __init__(
self,
druid_ingest_conn_id='druid_ingest_default',
timeout=1,
max_ingestion_time=None):
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {'content-type': 'application/json'}
def get_conn_url(self):
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
schema = conn.extra_dejson.get('schema', 'http')
endpoint = conn.extra_dejson.get('endpoint', '')
return "http://{host}:{port}/{endpoint}".format(**locals())
def submit_indexing_job(self, json_index_spec):
url = self.get_conn_url()
req_index = requests.post(url, data=json_index_spec, headers=self.header)
if (req_index.status_code != 200):
raise AirflowException("Did not get 200 when submitting the Druid job to {}".format(url))
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json['task']
running = True
sec = 0
while running:
req_status = requests.get("{0}/{1}/status".format(url, druid_task_id))
self.log.info("Job still running for %s seconds...", sec)
sec = sec + 1
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post("{0}/{1}/shutdown".format(url, druid_task_id))
raise AirflowException('Druid ingestion took more than %s seconds', self.max_ingestion_time)
time.sleep(self.timeout)
status = req_status.json()['status']['status']
if status == 'RUNNING':
running = True
elif status == 'SUCCESS':
running = False # Great success!
elif status == 'FAILED':
raise AirflowException('Druid indexing job failed, check console for more info')
else:
raise AirflowException('Could not get status of the job, got %s', status)
self.log.info('Successful index')
class DruidDbApiHook(DbApiHook):
"""
Interact with Druid broker
This hook is purely for users to query druid broker.
For ingestion, please use druidHook.
"""
conn_name_attr = 'druid_broker_conn_id'
default_conn_name = 'druid_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(DruidDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log('Get the connection to druid broker on {host}'.format(host=conn.host))
return druid_broker_conn
def get_uri(self):
"""
Get the connection uri for druid broker.
e.g: druid://localhost:8082/druid/v2/sql/
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'druid' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'druid/v2/sql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
| apache-2.0 |
corochann/chainerex | chainerex/utils/visualize/plot.py | 1 | 2950 | """
2017.11.25
Note the name has changed to
Original `plot_roc_auc_curve` method has changed to the name
`plot_roc_auc_curve_by_fpr_tpr`
And `plot_roc_auc_curve` as implemented as more convenient function
"""
import matplotlib.pyplot as plt
from sklearn import metrics
def plot_roc_auc_curve(filepath, label, prob, pos_label=1, title=None):
"""Plot ROC-AUC curve, and save in png file.
Ref: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py
Admonition ...Examples
>>> import numpy as np
>>> test_label = np.array([1, 1, 0, 0])
>>> test_score = np.array([0.1, 0.4, 0.35, 0.8])
>>> plot_roc_auc_curve('roc.png', test_label, test_score)
Args:
filepath:
label:
prob:
title:
Returns:
"""
roc_auc = metrics.roc_auc_score(label, prob)
fpr, tpr, thresholds = metrics.roc_curve(label, prob, pos_label=pos_label)
plot_roc_auc_curve_by_fpr_tpr(filepath, fpr, tpr, roc_auc=roc_auc,
title=title)
return roc_auc, fpr, tpr, thresholds
def plot_roc_auc_curve_by_fpr_tpr(filepath, fpr, tpr, roc_auc=None,
title=None):
"""Plot ROC-AUC curve, and save in png file.
Ref: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py
Admonition ...Examples
>>> import numpy as np
>>> from sklearn import metrics
>>> test_label = np.array([1, 1, 0, 0])
>>> test_score = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc = metrics.roc_auc_score(test_label, test_prob)
>>> fpr, tpr, thresholds = metrics.roc_curve(test_label, test_prob, pos_label=1)
>>> plot_roc_auc_curve_by_fpr_tpr('roc.png', fpr, tpr, roc_auc)
Args:
filepath (str):
fpr (numpy.ndarray):
tpr (numpy.ndarray):
roc_auc (float or None):
title (str or None):
"""
lw = 2
title = title or 'Receiver operating characteristic'
if roc_auc is not None:
label = 'ROC curve (area = {:0.2f})'.format(roc_auc)
else:
label = 'ROC curve'
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=lw, label=label)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(title)
plt.legend(loc="lower right")
plt.savefig(filepath)
if __name__ == '__main__':
import numpy as np
test_label = np.array([0, 0, 1, 1])
test_score = np.array([0.1, 0.4, 0.35, 0.8])
roc_auc = metrics.roc_auc_score(test_label, test_score)
fpr, tpr, thresholds = metrics.roc_curve(test_label, test_score, pos_label=1)
plot_roc_auc_curve_by_fpr_tpr('roc.png', fpr, tpr, roc_auc)
| mit |
EricssonResearch/scott-eu | simulation-ros/src/turtlebot2i/turtlebot2i_msdn/msdn/faster_rcnn/rpn_msr/generate.py | 6 | 4403 | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import numpy as np
import cv2
import matplotlib.pyplot as plt
from ..utils.blob import im_list_to_blob
from ..utils.timer import Timer
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
def _vis_proposals(im, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
class_name = 'obj'
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
processed_ims = []
assert len(cfg.TEST.SCALES_BASE) == 1
im_scale = cfg.TRAIN.SCALES_BASE[0]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_info = np.hstack((im.shape[:2], im_scale))[np.newaxis, :]
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_info
def im_proposals(net, im):
"""Generate RPN proposals on a single image."""
blobs = {}
blobs['data'], blobs['im_info'] = _get_image_blob(im)
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
blobs_out = net.forward(
data=blobs['data'].astype(np.float32, copy=False),
im_info=blobs['im_info'].astype(np.float32, copy=False))
scale = blobs['im_info'][0, 2]
boxes = blobs_out['rois'][:, 1:].copy() / scale
scores = blobs_out['scores'].copy()
return boxes, scores
def imdb_proposals(net, imdb):
"""Generate RPN proposals on all images in an imdb."""
_t = Timer()
imdb_boxes = [[] for _ in xrange(imdb.num_images)]
for i in xrange(imdb.num_images):
im = cv2.imread(imdb.image_path_at(i))
_t.tic()
imdb_boxes[i], scores = im_proposals(net, im)
_t.toc()
print 'im_proposals: {:d}/{:d} {:.3f}s' \
.format(i + 1, imdb.num_images, _t.average_time)
if 0:
dets = np.hstack((imdb_boxes[i], scores))
# from IPython import embed; embed()
_vis_proposals(im, dets[:3, :], thresh=0.9)
plt.show()
return imdb_boxes
def imdb_proposals_det(net, imdb):
"""Generate RPN proposals on all images in an imdb."""
_t = Timer()
imdb_boxes = [[] for _ in xrange(imdb.num_images)]
for i in xrange(imdb.num_images):
im = cv2.imread(imdb.image_path_at(i))
_t.tic()
boxes, scores = im_proposals(net, im)
_t.toc()
print 'im_proposals: {:d}/{:d} {:.3f}s' \
.format(i + 1, imdb.num_images, _t.average_time)
dets = np.hstack((boxes, scores))
imdb_boxes[i] = dets
if 0:
# from IPython import embed; embed()
_vis_proposals(im, dets[:3, :], thresh=0.9)
plt.show()
return imdb_boxes
| apache-2.0 |
KevinHCChen/wireless-aoa | scripts/mlAOA.py | 1 | 4026 | import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from matplotlib import colors
import sys
import itertools
from sklearn.cross_validation import LeavePOut
# from sklearn.svm import LinearSVC as SVM
from sklearn.svm import SVC as SVM
from sklearn.linear_model import LogisticRegression as LR
from sklearn.neighbors import KNeighborsClassifier as KNN
from MLP import MLPClassifier
from MLP import MLPRegressor
import math
# 96 inches (indoor)
# 28.5 feet (outdoor)
rx_num = 4
distance_between_rx = 0.088
frequency = 916e6
speedoflight = 3e8
wavelength = speedoflight/frequency
def angle2c(theta):
return 1j*np.sin(theta)+np.cos(theta)
def steeringVector(theta):
phase_diff = distance_between_rx*np.cos(theta)*2*np.pi/wavelength
return angle2c(np.array([rx*phase_diff for rx in range(rx_num)]))
def angleDiff(signal1, signal2):
return np.mod(np.angle(signal1)-np.angle(signal2)+np.pi, 2*np.pi)-np.pi
def angular_diff(samples):
return [angleDiff( samples[i+1], samples[i] ) for i in range(len(samples)-1) ]
def shift2degree(x):
return np.arccos(x)/np.pi*180.
def degree2shift(x):
return (np.cos(x/180.*np.pi))
def identity(x):
return x
plt.ion()
DataFolder = '../data/0620-outdoor/'
# DataFolder = '../data/0621-indoor/'
data = np.loadtxt(DataFolder+'ANGDIFF.txt',delimiter=',')
mlp = MLPClassifier(hidden_layer_sizes=(50, 50), activation='tanh', algorithm='adam', alpha=0.0001)
mlp_reg = MLPRegressor(hidden_layer_sizes=(200, 50, 50), activation='relu', verbose=True,
algorithm='adam', alpha=0.000, tol=1e-5, early_stopping=True)
# from sklearn.svm import SVC as SVM
svm = SVM(C=100)
# classifier = svm
classifier = mlp
classifier = mlp_reg
if False:
X = data[:,-3:]
Y = data[:,0].astype(int)
# Y = np.cos(Y/180.*np.pi)
# Y = degree2shift(Y)
ID = data[:,1]
f = identity
else:
Y = np.random.randint(800,1200, size=(300))/10.
X = np.vstack( [ angular_diff(steeringVector(theta)) for theta in Y/180.*np.pi ] )
X = X + np.random.randn(X.shape[0], X.shape[1])*5e-2
# Y = np.cos(Y/180.*np.pi)
ID = Y
f = identity
# Y = degree2shift(Y)
# f = shift2degree
# Start doing the standard thing
# X = np.hstack([X**p/math.factorial(p) for p in range(1,5)])
X = np.hstack([X**p for p in range(1,4)])
plt.clf()
plt.plot(f(Y), f(Y) ,'o-', alpha=0.3)
plt.plot(f(Y), f(classifier.fit(X,Y).predict(X)) ,'.')
# assert(0)
score = []
labels = []
max_run = 100
for train_index, test_index in LeavePOut(len(X), 1):
max_run -= 1
if max_run <1:
break
classifier.fit(X[train_index,:],Y[train_index])
score.append( classifier.score(X[test_index,:],Y[test_index]) )
# print classifier.score(X[test_index,:],Y[test_index]), Y[test_index], ID[test_index]
labels.append([classifier.predict(X[test_index,:]), Y[test_index]])
if score[-1] != 1:
print Y[test_index], classifier.predict(X[test_index,:]), ID[test_index]
from sklearn.metrics import confusion_matrix as cm
labels = np.hstack(labels)
error = (labels[0]-labels[1])
error_ml = np.abs(error)
# plt.plot(Y, Y ,'o-', alpha=0.3)
plt.plot(f(labels[1]), f(labels[0]),'.')
if False:
Y = (np.random.randint(300,1200, size=(1500)))/10.
X = np.vstack( [ angular_diff(steeringVector(theta)) for theta in Y/180.*np.pi ] )
X = np.hstack([X**p for p in range(1,4)])
Y = np.cos((180-Y)/180.*np.pi)
ID = Y
classifier.fit(X,Y)
X = data[:,-3:]
X = np.hstack([X**p for p in range(1,4)])
Y = data[:,0].astype(int)
Y = np.cos(Y/180.*np.pi)
ID = data[:,1]
plt.clf(); plt.plot(f(Y), f(classifier.predict(X)),'.')
labels = [f(Y), f(classifier.predict(X))]
error = (labels[0]-labels[1])
error_ml = np.abs(error)
print np.mean(error_ml)
| mit |
anurag313/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
low-sky/colira | plots/galplots.py | 1 | 1206 | from astropy.table import Table
import matplotlib.pyplot as p
import numpy as np
from matplotlib import rc
#rc('text', usetex=True)
rc('font',family='serif')
def galscatter2d():
t = Table.read('brs.bygal2d.txt',format='ascii')
fig = p.figure(figsize=(4.5,4.5))
t['R21+'] = t['R21+']-t['R21']
t['R21-'] = t['R21']-t['R21-']
t['R32+'] = t['R32+']-t['R32']
t['R32-'] = t['R32']-t['R32-']
ax = p.subplot(111)
#ax.set_xlim(6e-4,1e-1)
ax.set_xlabel(r'$R_{21}$')
ax.set_ylabel(r'$R_{32}$')
marker = np.ones(len(t['Name'])).astype('str')
marker[:]='4'
p.errorbar(t['R21'],t['R32'],
xerr=[t['R21-'],t['R21+']],
yerr=[t['R32-'],t['R32+']],
linestyle='None',marker=marker,color='grey',
label=np.asarray(t['Name']))
p.legend()
# for ii,gal in enumerate(t['Name']):
# p.errorbar(t['R21'][ii],t['R32'][ii],
# xerr=[t['R21-'][ii],t['R21+'][ii]],
# yerr=[t['R32-'][ii],t['R32+'][ii]],
# linestyle='None',marker='o',color='grey',label=gal)
p.tight_layout()
p.savefig('ratio_bygal2d.pdf',bbox='tight')
p.close()
| gpl-2.0 |
musically-ut/statsmodels | statsmodels/sandbox/tsa/examples/ex_mle_garch.py | 31 | 10676 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 01:01:50 2010
Author: josef-pktd
latest result
-------------
all are very close
garch0 has different parameterization of constant
ordering of parameters is different
seed 2780185
h.shape (2000,)
Optimization terminated successfully.
Current function value: 2093.813397
Iterations: 387
Function evaluations: 676
ggres.params [-0.6146253 0.1914537 0.01039355 0.78802188]
Optimization terminated successfully.
Current function value: 2093.972953
Iterations: 201
Function evaluations: 372
ggres0.params [-0.61537527 0.19635128 4.00706058]
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2093.972953
Iterations: 51
Function evaluations: 551
Gradient evaluations: 110
ggres0.params [-0.61537855 0.19635265 4.00694669]
Optimization terminated successfully.
Current function value: 2093.751420
Iterations: 103
Function evaluations: 187
[ 0.78671519 0.19692222 0.61457171]
-2093.75141963
Final Estimate:
LLH: 2093.750 norm LLH: 2.093750
omega alpha1 beta1
0.7867438 0.1970437 0.6145467
long run variance comparison
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
R
>>> 0.7867438/(1- 0.1970437- 0.6145467)
4.1757097302897526
Garch (gjr) asymetric, longrun var ?
>>> 1/(1-0.6146253 - 0.1914537 - 0.01039355) * 0.78802188
4.2937548579245242
>>> 1/(1-0.6146253 - 0.1914537 + 0.01039355) * 0.78802188
3.8569053452140345
Garch0
>>> (1-0.61537855 - 0.19635265) * 4.00694669
0.7543830449902722
>>> errgjr4.var() #for different random seed
4.0924199964716106
todo: add code and verify, check for longer lagpolys
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import statsmodels.api as sm
from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
nobs = 1000
examples = ['garch', 'rpyfit']
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print('seed', seed)
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print('seed', seed)
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.8,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation - DGP')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print('ggres0.params', ggres0.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2])
print(g11res)
llf = loglike_GARCH11(g11res, errgjr4-errgjr4.mean())
print(llf[0])
if 'rpyfit' in examples:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4-errgjr4.mean(), include_mean=False)
if 'rpysim' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print('R acf', tsa.acf(np.power(x,2))[:15])
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print(arma3res.params)
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print(arma3bres.params)
xr = r.garchSim( n = 100)
x = np.asarray(xr)
ggmod = Garch(x-x.mean())
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, x-x.mean())[0], [0.6, 0.6, 0.2])
print(g11res)
llf = loglike_GARCH11(g11res, x-x.mean())
print(llf[0])
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
fit = r.garchFit(f, data = x-x.mean(), include_mean=False, trace=False)
print(r.summary(fit))
'''based on R default simulation
model = list(omega = 1e-06, alpha = 0.1, beta = 0.8)
nobs = 1000
(with nobs=500, gjrgarch doesn't do well
>>> ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
Optimization terminated successfully.
Current function value: -448.861335
Iterations: 385
Function evaluations: 690
>>> print('ggres.params', ggres.params
ggres.params [ -7.75090330e-01 1.57714749e-01 -9.60223930e-02 8.76021411e-07]
rearranged
8.76021411e-07 1.57714749e-01(-9.60223930e-02) 7.75090330e-01
>>> print(g11res
[ 2.97459808e-06 7.83128600e-01 2.41110860e-01]
>>> llf = loglike_GARCH11(g11res, x-x.mean())
>>> print(llf[0]
442.603541936
Log Likelihood:
-448.9376 normalized: -4.489376
omega alpha1 beta1
1.01632e-06 1.02802e-01 7.57537e-01
'''
''' the following is for errgjr4-errgjr4.mean()
ggres.params [-0.54510407 0.22723132 0.06482633 0.82325803]
Final Estimate:
LLH: 2065.56 norm LLH: 2.06556
mu omega alpha1 beta1
0.07229732 0.83069480 0.26313883 0.53986167
ggres.params [-0.50779163 0.2236606 0.00700036 1.154832
Final Estimate:
LLH: 2116.084 norm LLH: 2.116084
mu omega alpha1 beta1
-4.759227e-17 1.145404e+00 2.288348e-01 5.085949e-01
run3
DGP
0.4/?? 0.8 0.7
gjrgarch:
ggres.params [-0.45196579 0.2569641 0.02201904 1.11942636]
rearranged
const/omega ma1/alpha1 ar1/beta1
1.11942636 0.2569641(+0.02201904) 0.45196579
g11:
[ 1.10262688 0.26680468 0.45724957]
-2055.73912687
R:
Final Estimate:
LLH: 2055.738 norm LLH: 2.055738
mu omega alpha1 beta1
-1.665226e-17 1.102396e+00 2.668712e-01 4.573224e-01
fit = r.garchFit(f, data = errgjr4-errgjr4.mean())
rpy.RPy_RException: Error in solve.default(fit$hessian) :
Lapack routine dgesv: system is exactly singular
run4
DGP:
mu=0.4, scale=1.01
ma = [[1., 0, 0],[0, 0.8,0.0]], ar = [1.0, -0.7]
maybe something wrong with simulation
gjrgarch
ggres.params [-0.50554663 0.24449867 -0.00521004 1.00796791]
rearranged
1.00796791 0.24449867(-0.00521004) 0.50554663
garch11:
[ 1.01258264 0.24149155 0.50479994]
-2056.3877404
R include_constant=False
Final Estimate:
LLH: 2056.397 norm LLH: 2.056397
omega alpha1 beta1
1.0123560 0.2409589 0.5049154
'''
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
if 'sp500' in examples:
import tabular as tb
import scikits.timeseries as ts
a = tb.loadSV(r'C:\Josef\work-oth\gspc_table.csv')
s = ts.time_series(a[0]['Close'][::-1],
dates=ts.date_array(a[0]['Date'][::-1],freq="D"))
sp500 = a[0]['Close'][::-1]
sp500r = np.diff(np.log(sp500))
#plt.show()
| bsd-3-clause |
rjeli/scikit-image | skimage/external/tifffile/tifffile.py | 7 | 219462 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2016, Christoph Gohlke
# Copyright (c) 2008-2016, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read image and meta data from (bio)TIFF files. Save numpy arrays as TIFF.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data, chroma subsampling,
or EXIF, IPTC, GPS, and XMP metadata is not implemented. Only primary info
records are read for STK, FluoView, MicroManager, and NIH Image formats.
TIFF, the Tagged Image File Format aka Thousands of Incompatible File Formats,
is under the control of Adobe Systems. BigTIFF allows for files greater than
4 GB. STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions
defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss
MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics,
Molecular Dynamics, and the Open Microscopy Environment consortium
respectively.
For command line usage run `python tifffile.py --help`
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2016.04.18
Requirements
------------
* `CPython 2.7 or 3.5 <http://www.python.org>`_ (64 bit recommended)
* `Numpy 1.10 <http://www.numpy.org>`_
* `Matplotlib 1.5 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2016.04.13 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Revisions
---------
2016.04.18
Pass 1932 tests.
TiffWriter, imread, and imsave accept open binary file streams.
2016.04.13
Correctly handle reversed fill order in 2 and 4 bps images (bug fix).
Implement reverse_bitorder in C.
2016.03.18
Fixed saving additional ImageJ metadata.
2016.02.22
Pass 1920 tests.
Write 8 bytes double tag values using offset if necessary (bug fix).
Add option to disable writing second image description tag.
Detect tags with incorrect counts.
Disable color mapping for LSM.
2015.11.13
Read LSM 6 mosaics.
Add option to specify directory of memory-mapped files.
Add command line options to specify vmin and vmax values for colormapping.
2015.10.06
New helper function to apply colormaps.
Renamed is_palette attributes to is_indexed (backwards incompatible).
Color-mapped samples are now contiguous (backwards incompatible).
Do not color-map ImageJ hyperstacks (backwards incompatible).
Towards supporting Leica SCN.
2015.09.25
Read images with reversed bit order (fill_order is lsb2msb).
2015.09.21
Read RGB OME-TIFF.
Warn about malformed OME-XML.
2015.09.16
Detect some corrupted ImageJ metadata.
Better axes labels for 'shaped' files.
Do not create TiffTags for default values.
Chroma subsampling is not supported.
Memory-map data in TiffPageSeries if possible (optional).
2015.08.17
Pass 1906 tests.
Write ImageJ hyperstacks (optional).
Read and write LZMA compressed data.
Specify datetime when saving (optional).
Save tiled and color-mapped images (optional).
Ignore void byte_counts and offsets if possible.
Ignore bogus image_depth tag created by ISS Vista software.
Decode floating point horizontal differencing (not tiled).
Save image data contiguously if possible.
Only read first IFD from ImageJ files if possible.
Read ImageJ 'raw' format (files larger than 4 GB).
TiffPageSeries class for pages with compatible shape and data type.
Try to read incomplete tiles.
Open file dialog if no filename is passed on command line.
Ignore errors when decoding OME-XML.
Rename decoder functions (backwards incompatible)
2014.08.24
TiffWriter class for incremental writing images.
Simplified examples.
2014.08.19
Add memmap function to FileHandle.
Add function to determine if image data in TiffPage is memory-mappable.
Do not close files if multifile_close parameter is False.
2014.08.10
Pass 1730 tests.
Return all extrasamples by default (backwards incompatible).
Read data from series of pages into memory-mapped array (optional).
Squeeze OME dimensions (backwards incompatible).
Workaround missing EOI code in strips.
Support image and tile depth tags (SGI extension).
Better handling of STK/UIC tags (backwards incompatible).
Disable color mapping for STK.
Julian to datetime converter.
TIFF ASCII type may be NULL separated.
Unwrap strip offsets for LSM files greater than 4 GB.
Correct strip byte counts in compressed LSM files.
Skip missing files in OME series.
Read embedded TIFF files.
2014.02.05
Save rational numbers as type 5 (bug fix).
2013.12.20
Keep other files in OME multi-file series closed.
FileHandle class to abstract binary file handle.
Disable color mapping for bad OME-TIFF produced by bio-formats.
Read bad OME-XML produced by ImageJ when cropping.
2013.11.03
Allow zlib compress data in imsave function (optional).
Memory-map contiguous image data (optional).
2013.10.28
Read MicroManager metadata and little endian ImageJ tag.
Save extra tags in imsave function.
Save tags in ascending order by code (bug fix).
2012.10.18
Accept file like objects (read from OIB files).
2012.08.21
Rename TIFFfile to TiffFile and TIFFpage to TiffPage.
TiffSequence class for reading sequence of TIFF files.
Read UltraQuant tags.
Allow float numbers as resolution in imsave function.
2012.08.03
Read MD GEL tags and NIH Image header.
2012.07.25
Read ImageJ tags.
...
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <https://github.com/luispedro/imread>`_
* `PyLibTiff <https://github.com/pearu/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <https://github.com/vasole/pymca>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
* Christian Kliche for help writing tiled and color-mapped files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import lzma
except ImportError:
try:
import backports.lzma as lzma
except ImportError:
lzma = None
try:
if __package__:
from . import _tifffile
else:
import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be very slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2016.04.18'
__docformat__ = 'restructuredtext en'
__all__ = (
'imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter', 'TiffSequence',
# utility functions used in oiffile and czifile
'FileHandle', 'lazyattr', 'natural_sorted', 'decode_lzw', 'stripnull')
def imsave(file, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
file : str or binary stream
File name or writable binary stream, such as a open file or BytesIO.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', 'software', and 'imagej', are passed
to the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution', 'compress',
'colormap', 'tile', 'description', 'datetime', 'metadata', 'contiguous'
and 'extratags' are passed to the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> imsave('temp.tif', data, compress=6, metadata={'axes': 'TZCYX'})
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'imagej'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'bigtiff' not in tifargs and 'imagej' not in tifargs and (
data.size*data.dtype.itemsize > 2000*2**20):
tifargs['bigtiff'] = True
with TiffWriter(file, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the 'close' method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'document_name': 269,
'image_description': 270, 'strip_offsets': 273, 'orientation': 274,
'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'smin_sample_value': 340, 'smax_sample_value': 341,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, file, bigtiff=False, byteorder=None,
software='tifffile.py', imagej=False):
"""Open a TIFF file for writing.
Use bigtiff=True when creating files larger than 2 GB.
Parameters
----------
file : str, binary stream, or FileHandle
File name or writable binary stream, such as a open file
or BytesIO.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the file.
Saved with the first page in the file only.
imagej : bool
If True, write an ImageJ hyperstack compatible file.
This format can handle data types uint8, uint16, or float32 and
data shapes up to 6 dimensions in TZCYXS order.
RGB images (S=3 or S=4) must be uint8.
ImageJ's default byte order is big endian but this implementation
uses the system's native byte order by default.
ImageJ does not support BigTIFF format or LZMA compression.
The ImageJ file format is undocumented.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
if imagej and bigtiff:
warnings.warn("writing incompatible bigtiff ImageJ")
self._byteorder = byteorder
self._software = software
self._imagej = bool(imagej)
self._metadata = None
self._colormap = None
self._description_offset = 0
self._description_len_offset = 0
self._description_len = 0
self._tags = None
self._shape = None # normalized shape of data in consecutive pages
self._data_shape = None # shape of data in consecutive pages
self._data_dtype = None # data type
self._data_offset = None # offset to data
self._data_byte_counts = None # byte counts per plane
self._tag_offsets = None # strip or tile offset tag code
self._fh = FileHandle(file, mode='wb', size=0)
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._value_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._value_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
compress=0, colormap=None, tile=None, datetime=None,
description=None, metadata={}, contiguous=True, extratags=()):
"""Write image data and tags to TIFF file.
Image data are written in one stripe per plane by default.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' tags are derived from
the data type.
Parameters
----------
data : numpy.ndarray
Input image. The last dimensions are assumed to be image depth,
height (length), width, and samples.
If a colormap is provided, the dtype must be uint8 or uint16 and
the data values are indices into the last dimension of the
colormap.
photometric : {'minisblack', 'miniswhite', 'rgb', 'palette'}
The color space of the image data.
By default this setting is inferred from the data shape and the
value of colormap.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
compress : int or 'lzma'
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
Compression cannot be used to write contiguous files.
If 'lzma', LZMA compression is used, which is not available on
all platforms.
colormap : numpy.ndarray
RGB color values for the corresponding data value.
Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16.
tile : tuple of int
The shape (depth, length, width) of image tiles to write.
If None (default), image data are written in one stripe per plane.
The tile length and width must be a multiple of 16.
If the tile depth is provided, the SGI image_depth and tile_depth
tags are used to save volume data. Few software can read the
SGI format, e.g. MeVisLab.
datetime : datetime
Date and time of image creation. Saved with the first page only.
If None (default), the current date and time is used.
description : str
The subject of the image. Saved with the first page only.
Cannot be used with the ImageJ format.
metadata : dict
Additional meta data to be saved along with shape information
in JSON or ImageJ formats in an image_description tag.
If None, do not write second image_description tag.
contiguous : bool
If True (default) and the data and parameters are compatible with
previous ones, if any, the data are stored contiguously after
the previous one. Parameters 'photometric' and 'planarconfig' are
ignored.
extratags : sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
# TODO: refactor this function
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
value_format = self._value_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
# just append contiguous data if possible
if self._data_shape:
if (not contiguous or
self._data_shape[1:] != data.shape or
self._data_dtype != data.dtype or
(compress and self._tags) or
tile or
not numpy.array_equal(colormap, self._colormap)):
# incompatible shape, dtype, compression mode, or colormap
self._write_remaining_pages()
self._write_image_description()
self._description_offset = 0
self._description_len_offset = 0
self._data_shape = None
self._colormap = None
if self._imagej:
raise ValueError(
"ImageJ does not support non-contiguous data")
else:
# consecutive mode
self._data_shape = (self._data_shape[0] + 1,) + data.shape
if not compress:
# write contiguous data, write ifds/tags later
fh.write_array(data)
return
if photometric not in (None, 'minisblack', 'miniswhite',
'rgb', 'palette'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
# prepare compression
if not compress:
compress = False
compress_tag = 1
elif compress == 'lzma':
compress = lzma.compress
compress_tag = 34925
if self._imagej:
raise ValueError("ImageJ can not handle LZMA compression")
elif not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
elif compress:
def compress(data, level=compress):
return zlib.compress(data, level)
compress_tag = 32946
# prepare ImageJ format
if self._imagej:
if description:
warnings.warn("not writing description to ImageJ file")
description = None
volume = False
if data.dtype.char not in 'BHhf':
raise ValueError("ImageJ does not support data type '%s'"
% data.dtype.char)
ijrgb = photometric == 'rgb' if photometric else None
if data.dtype.char not in 'B':
ijrgb = False
ijshape = imagej_shape(data.shape, ijrgb)
if ijshape[-1] in (3, 4):
photometric = 'rgb'
if data.dtype.char not in 'B':
raise ValueError("ImageJ does not support data type '%s' "
"for RGB" % data.dtype.char)
elif photometric is None:
photometric = 'minisblack'
planarconfig = None
if planarconfig == 'planar':
raise ValueError("ImageJ does not support planar images")
else:
planarconfig = 'contig' if ijrgb else None
# verify colormap and indices
if colormap is not None:
if data.dtype.char not in 'BH':
raise ValueError("invalid data dtype for palette mode")
colormap = numpy.asarray(colormap, dtype=byteorder+'H')
if colormap.shape != (3, 2**(data.itemsize * 8)):
raise ValueError("invalid color map shape")
self._colormap = colormap
# verify tile shape
if tile:
tile = tuple(int(i) for i in tile[:3])
volume = len(tile) == 3
if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or
any(i < 1 for i in tile)):
raise ValueError("invalid tile shape")
else:
tile = ()
volume = False
# normalize data shape to 5D or 6D, depending on volume:
# (pages, planar_samples, [depth,] height, width, contig_samples)
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if colormap is not None:
photometric = 'palette'
planarconfig = None
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif self._imagej:
photometric = 'minisblack'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
photometric != 'palette' and
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
# normalize shape to 6D
assert len(data.shape) in (5, 6)
if len(data.shape) == 5:
data = data.reshape(data.shape[:2] + (1,) + data.shape[2:])
shape = data.shape
if tile and not volume:
tile = (1, tile[-2], tile[-1])
if photometric == 'palette':
if (samplesperpixel != 1 or extrasamples or
shape[1] != 1 or shape[-1] != 1):
raise ValueError("invalid data shape for palette mode")
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
strip_or_tile = 'tile' if tile else 'strip'
tag_byte_counts = TiffWriter.TAGS[strip_or_tile + '_byte_counts']
tag_offsets = TiffWriter.TAGS[strip_or_tile + '_offsets']
self._tag_offsets = tag_offsets
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value
# Append (code, ifdentry, ifdvalue, writeonce) to tags list
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
rawcount = value.find(b'\0\0')
if rawcount < 0:
rawcount = count
else:
rawcount += 1 # length of string without buffer
value = (value,)
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if struct.calcsize(dtype) * count <= offset_size:
# value(s) can be written directly
if count == 1:
if isinstance(value, (tuple, list, numpy.ndarray)):
value = value[0]
ifdentry.append(pack(value_format, pack(dtype, value)))
else:
ifdentry.append(pack(value_format,
pack(str(count)+dtype, *value)))
else:
# use offset to value(s)
ifdentry.append(pack(offset_format, 0))
if isinstance(value, numpy.ndarray):
assert value.size == count
assert value.dtype.char == dtype
ifdvalue = value.tobytes()
elif isinstance(value, (tuple, list)):
ifdvalue = pack(str(count)+dtype, *value)
else:
ifdvalue = pack(dtype, value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if description:
# user provided description
addtag('image_description', 's', 0, description, writeonce=True)
# write shape and metadata to image_description
self._metadata = {} if not metadata else metadata
if self._imagej:
description = imagej_description(
data_shape, shape[-1] in (3, 4), self._colormap is not None,
**self._metadata)
elif metadata or metadata == {}:
description = image_description(
data_shape, self._colormap is not None, **self._metadata)
else:
description = None
if description:
# add 32 bytes buffer
# the image description might be updated later with the final shape
description += b'\0'*32
self._description_len = len(description)
addtag('image_description', 's', 0, description, writeonce=True)
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page in file
if datetime is None:
datetime = self._now()
addtag('datetime', 's', 0, datetime.strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, compress_tag)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if tile:
addtag('tile_width', 'I', 1, tile[-1])
addtag('tile_length', 'I', 1, tile[-2])
if tile[0] > 1:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, tile[0])
addtag('new_subfile_type', 'I', 1, 0)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1, {'miniswhite': 0, 'minisblack': 1,
'rgb': 2, 'palette': 3}[photometric])
if colormap is not None:
addtag('color_map', 'H', colormap.size, colormap)
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8,) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
if not tile:
addtag('rows_per_strip', 'I', 1, shape[-3]) # * shape[-4]
if tile:
# use one chunk per tile per plane
tiles = ((shape[2] + tile[0] - 1) // tile[0],
(shape[3] + tile[1] - 1) // tile[1],
(shape[4] + tile[2] - 1) // tile[2])
numtiles = product(tiles) * shape[1]
strip_byte_counts = [
product(tile) * shape[-1] * data.dtype.itemsize] * numtiles
addtag(tag_byte_counts, offset_format, numtiles, strip_byte_counts)
addtag(tag_offsets, offset_format, numtiles, [0] * numtiles)
# allocate tile buffer
chunk = numpy.empty(tile + (shape[-1],), dtype=data.dtype)
else:
# use one strip per plane
strip_byte_counts = [
data[0, 0].size * data.dtype.itemsize] * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], [0] * shape[1])
# add extra tags from user
for t in extratags:
addtag(*t)
# TODO: check TIFFReadDirectoryCheckOrder warning in files containing
# multiple tags of same code
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not (self._bigtiff or self._imagej) and (
fh.tell() + data.size*data.dtype.itemsize > 2**31-1):
raise ValueError("data too large for standard TIFF file")
# if not compressed or tiled, write the first ifd and then all data
# contiguously; else, write all ifds and data interleaved
for pageindex in range(shape[0] if (compress or tile) else 1):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
elif tag[0] == 270 and tag[2].endswith(b'\0\0\0\0'):
# image description buffer
self._description_offset = pos
self._description_len_offset = (
tag_offset + tagindex * tag_size + 4)
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
if tile:
for plane in data[pageindex]:
for tz in range(tiles[0]):
for ty in range(tiles[1]):
for tx in range(tiles[2]):
c0 = min(tile[0], shape[2] - tz*tile[0])
c1 = min(tile[1], shape[3] - ty*tile[1])
c2 = min(tile[2], shape[4] - tx*tile[2])
chunk[c0:, c1:, c2:] = 0
chunk[:c0, :c1, :c2] = plane[
tz*tile[0]:tz*tile[0]+c0,
ty*tile[1]:ty*tile[1]+c1,
tx*tile[2]:tx*tile[2]+c2]
if compress:
t = compress(chunk)
strip_byte_counts.append(len(t))
fh.write(t)
else:
fh.write_array(chunk)
fh.flush()
elif compress:
for plane in data[pageindex]:
plane = compress(plane)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
fh.write_array(data)
# update strip/tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip/tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [tag for tag in tags if not tag[-1]]
# if uncompressed, write remaining ifds/tags later
if not (compress or tile):
self._tags = tags
self._shape = shape
self._data_shape = (1,) + data_shape
self._data_dtype = data.dtype
self._data_offset = data_offset
self._data_byte_counts = strip_byte_counts
def _write_remaining_pages(self):
"""Write outstanding IFDs and tags to file."""
if not self._tags:
return
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data_offset = self._data_offset
page_data_size = sum(self._data_byte_counts)
tag_bytes = b''.join(t[1] for t in self._tags)
numpages = self._shape[0] * self._data_shape[0] - 1
pos = fh.tell()
if not self._bigtiff and pos + len(tag_bytes) * numpages > 2**32 - 256:
if self._imagej:
warnings.warn("truncating ImageJ file")
return
raise ValueError("data too large for non-bigtiff file")
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
for _ in range(numpages):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifd entries
fh.write(pack(numtag_format, len(self._tags)))
tag_offset = fh.tell()
fh.write(tag_bytes)
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# offset to image data
data_offset += page_data_size
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(self._tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == self._tag_offsets:
strip_offsets_offset = pos
fh.write(tag[2])
# update strip/tile offsets if necessary
pos = fh.tell()
for tagindex, tag in enumerate(self._tags):
if tag[0] == self._tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in self._data_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
break
fh.seek(pos)
self._tags = None
self._data_dtype = None
self._data_offset = None
self._data_byte_counts = None
# do not reset _shape or _data_shape
def _write_image_description(self):
"""Write meta data to image_description tag."""
if (not self._data_shape or self._data_shape[0] == 1 or
self._description_offset <= 0):
return
colormapped = self._colormap is not None
if self._imagej:
isrgb = self._shape[-1] in (3, 4)
description = imagej_description(
self._data_shape, isrgb, colormapped, **self._metadata)
else:
description = image_description(
self._data_shape, colormapped, **self._metadata)
# rewrite description and its length to file
description = description[:self._description_len-1]
pos = self._fh.tell()
self._fh.seek(self._description_offset)
self._fh.write(description)
self._fh.seek(self._description_len_offset)
self._fh.write(struct.pack(self._byteorder+self._offset_format,
len(description)+1))
self._fh.seek(pos)
self._description_offset = 0
self._description_len_offset = 0
self._description_len = 0
def _now(self):
"""Return current date and time."""
return datetime.datetime.now()
def close(self, truncate=False):
"""Write remaining pages (if not truncate) and close file handle."""
if not truncate:
self._write_remaining_pages()
self._write_image_description()
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str, binary stream, or sequence
File name, seekable binary stream, glob pattern, or sequence of
file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> imsave('temp.tif', numpy.random.rand(3, 4, 301, 219))
>>> im = imread('temp.tif', key=0)
>>> im.shape
(4, 301, 219)
>>> ims = imread(['temp.tif', 'temp.tif'])
>>> ims.shape
(2, 3, 4, 301, 219)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if not hasattr(files, 'seek') and len(files) == 1:
files = files[0]
if isinstance(files, basestring) or hasattr(files, 'seek'):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func',)
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the 'close' method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list of TiffPage
All TIFF pages in file.
series : list of TiffPageSeries
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('temp.tif') as tif:
... data = tif.asarray()
... data.shape
(5, 301, 219)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True, maxpages=None,
fastij=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
maxpages : int
Number of pages to read (default: no limit).
fastij : bool
If True (default), try to use only the metadata from the first page
of ImageJ files. Significantly speeds up loading movies with
thousands of pages.
"""
self._fh = FileHandle(arg, mode='rb',
name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile(maxpages, fastij)
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self, maxpages=None, fastij=True):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
self._is_native = self.byteorder == {'big': '>',
'little': '<'}[sys.byteorder]
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43:
# BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if maxpages and len(self.pages) > maxpages:
break
if fastij and page.is_imagej:
if page._patch_imagej():
break # only read the first page of ImageJ files
fastij = False
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
# each series and position require separate unwrappig (undocumented)
for series in self.series:
positions = 1
for i in 0, 1:
if series.axes[i] in 'PM':
positions *= series.shape[i]
positions = len(series.pages) // positions
for i, page in enumerate(series.pages):
if not i % positions:
wrap = 0
previous_offset = 0
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
def asarray(self, key=None, series=None, memmap=False, tempdir=None):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int or TiffPageSeries
Defines which series of pages to return as array.
memmap : bool
If True, return an read-only array stored in a binary file on disk
if possible. The TIFF file is used if possible, else a temporary
file is created.
tempdir : str
The directory where the memory-mapped file will be created.
"""
if key is None and series is None:
series = 0
if series is not None:
try:
series = self.series[series]
except (KeyError, TypeError):
pass
pages = series.pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_indexed:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = apply_colormap(result, pages[0].color_map)
else:
result = stack_pages(pages, memmap=memmap, tempdir=tempdir,
colormapped=False, squeeze=False)
elif len(pages) == 1:
result = pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_indexed, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, series.dtype, shape=series.shape)
result = result.reshape(-1)
else:
result = numpy.empty(series.shape, series.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
elif key is None and series and series.offset:
result = self.filehandle.memmap_array(series.dtype, series.shape,
series.offset)
else:
result = stack_pages(pages, memmap=memmap, tempdir=tempdir)
if key is None:
try:
result.shape = series.shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, series.shape))
# try series of expected shapes
result.shape = (-1,) + series.shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
elif len(pages) == 1:
result.shape = pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
@lazyattr
def series(self):
"""Return pages with compatible properties as TiffPageSeries."""
if not self.pages:
return []
series = []
if self.is_ome:
series = self._ome_series()
elif self.is_fluoview:
series = self._fluoview_series()
elif self.is_lsm:
series = self._lsm_series()
elif self.is_imagej:
series = self._imagej_series()
elif self.is_nih:
series = self._nih_series()
if not series:
# generic detection of series
shapes = []
pages = {}
index = 0
for page in self.pages:
if not page.shape:
continue
if page.is_shaped:
index += 1 # shape starts a new series
shape = page.shape + (index, page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape in pages:
pages[shape].append(page)
else:
shapes.append(shape)
pages[shape] = [page]
series = []
for s in shapes:
shape = ((len(pages[s]),) + s[:-3] if len(pages[s]) > 1
else s[:-3])
axes = (('I' + s[-2]) if len(pages[s]) > 1 else s[-2])
page0 = pages[s][0]
if page0.is_shaped:
metadata = image_description_dict(page0.is_shaped)
reshape = metadata['shape']
if 'axes' in metadata:
reaxes = metadata['axes']
if len(reaxes) == len(reshape):
axes = reaxes
shape = reshape
else:
warnings.warn("axes do not match shape")
try:
axes = reshape_axes(axes, shape, reshape)
shape = reshape
except ValueError as e:
warnings.warn(e.message)
series.append(
TiffPageSeries(pages[s], shape, page0.dtype, axes))
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def _fluoview_series(self):
"""Return image series in FluoView file."""
page0 = self.pages[0]
dims = {
b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
axes = ''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1)
shape = tuple(int(i[1]) for i in mmhd if i[1] > 1)
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _lsm_series(self):
"""Return image series in LSM file."""
page0 = self.pages[0]
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
if hasattr(lsmi, 'dimension_p') and lsmi.dimension_p > 1:
axes += 'P'
if hasattr(lsmi, 'dimension_m') and lsmi.dimension_m > 1:
axes += 'M'
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
dtype = pages[0].dtype
series = [TiffPageSeries(pages, shape, dtype, axes)]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
dtype = pages[0].dtype
series.append(TiffPageSeries(pages, shape, dtype, axes))
return series
def _imagej_series(self):
"""Return image series in ImageJ file."""
# ImageJ's dimension order is always TZCYXS
# TODO: fix loading of color, composite or palette images
shape = []
axes = []
page0 = self.pages[0]
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not (self.is_rgb and not
ij.get('hyperstack', False)):
shape.append(ij['channels'])
axes.append('C')
remain = ij.get('images', len(self.pages)) // (product(shape)
if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
if page0.axes[0] == 'I':
# contiguous multiple images
shape.extend(page0.shape[1:])
axes.extend(page0.axes[1:])
elif page0.axes[:2] == 'SI':
# color-mapped contiguous multiple images
shape = page0.shape[0:1] + tuple(shape) + page0.shape[2:]
axes = list(page0.axes[0]) + axes + list(page0.axes[2:])
else:
shape.extend(page0.shape)
axes.extend(page0.axes)
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _nih_series(self):
"""Return image series in NIH file."""
page0 = self.pages[0]
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _ome_series(self):
"""Return image series in OME-TIFF file(s)."""
omexml = self.pages[0].tags['image_description'].value
try:
root = etree.fromstring(omexml)
except etree.ParseError as e:
# TODO: test this
warnings.warn("ome-xml: %s" % e)
omexml = omexml.decode('utf-8', 'ignore').encode('utf-8')
root = etree.fromstring(omexml)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
series = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * (size // self.pages[0].samples_per_pixel)
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
series.append(TiffPageSeries(ifds, shape, dtype, axes, self))
for serie in series:
shape = list(serie.shape)
for axis, (newaxis, labels) in modulo.items():
i = serie.axes.index(axis)
size = len(labels)
if shape[i] == size:
serie.axes = serie.axes.replace(axis, newaxis, 1)
else:
shape[i] //= size
shape.insert(i+1, size)
serie.axes = serie.axes.replace(axis, axis+newaxis, 1)
serie.shape = tuple(shape)
# squeeze dimensions
for serie in series:
serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes)
return series
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
"""File has BigTIFF format."""
return self.offset_size != 4
@lazyattr
def is_rgb(self):
"""File contains only RGB images."""
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_indexed(self):
"""File contains only indexed images."""
return all(p.is_indexed for p in self.pages)
@lazyattr
def is_mdgel(self):
"""File has MD Gel format."""
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
"""File was created by Media Cybernetics software."""
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
"""File has MetaMorph STK format."""
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
"""File was created by Carl Zeiss software."""
return len(self.pages) and self.pages[0].is_lsm
@lazyattr
def is_vista(self):
"""File was created by ISS Vista."""
return len(self.pages) and self.pages[0].is_vista
@lazyattr
def is_imagej(self):
"""File has ImageJ format."""
return len(self.pages) and self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
"""File was created by MicroManager."""
return len(self.pages) and self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
"""File has NIH Image format."""
return len(self.pages) and self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
"""File was created by Olympus FluoView."""
return len(self.pages) and self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
"""File has OME-TIFF format."""
return len(self.pages) and self.pages[0].is_ome
@lazyattr
def is_scn(self):
"""File has Leica SCN format."""
return len(self.pages) and self.pages[0].is_scn
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, color-mapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
color-mapped and with extra samples if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy.ndarray
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes/images (stk, ij).
1. planar samples_per_pixel.
2. image_depth Z (sgi).
3. image_length Y.
4. image_width X.
5. contig samples_per_pixel.
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._offset = 0
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0
or a corrupted page list is encountered.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
# read offset to this IFD
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
if offset >= fh.size:
warnings.warn("invalid page offset > file size")
raise StopIteration()
self._offset = offset
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
if numtags > 4096:
raise ValueError("suspicious number of tags")
except Exception:
warnings.warn("corrupted page list at offset %i" % offset)
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell() # where offset to next IFD can be found
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._fix_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if name in tags:
#tags[name] = TiffTag(code, dtype=dtype, count=count,
# value=default, name=name)
if validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
elif default is not None:
setattr(self, name, validate[default] if validate else default)
if 'bits_per_sample' in tags:
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
if 'sample_format' in tags:
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v]
for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_vista or self.parent.is_vista:
# ISS Vista writes wrong image_depth tag
self.image_depth = 1
if self.is_indexed:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
# TODO: support other photometric modes than RGB
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_indexed:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (planes, image_length, image_width,
self.color_map.shape[0])
else:
self.shape = (planes, image_depth, image_length,
image_width, self.color_map.shape[0])
self.axes = self.axes + 'S'
else:
warnings.warn("palette cannot be applied")
self.is_indexed = False
elif self.is_indexed:
samples = 1
if 'extra_samples' in self.tags:
samples += self.tags['extra_samples'].count
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (image_length, image_width,
self.color_map.shape[0])
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
self.color_map.shape[0])
self.axes = 'ZYXS'
else:
warnings.warn("palette cannot be applied")
self.is_indexed = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples,)
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8),)
assert len(self.shape) == len(self.axes)
def _patch_imagej(self):
"""Return if ImageJ data are contiguous and adjust page attributes.
Patch 'strip_offsets' and 'strip_byte_counts' tags to span the
complete contiguous data.
ImageJ stores all image metadata in the first page and image data is
stored contiguously before the second page, if any. No need to
read other pages.
"""
if not self.is_imagej or not self.is_contiguous or self.parent.is_ome:
return
images = self.imagej_tags.get('images', 0)
if images <= 1:
return
offset, count = self.is_contiguous
shape = self.shape
if self.is_indexed:
shape = shape[:-1]
if (count != product(shape) * self.bits_per_sample // 8 or
offset + count*images > self.parent.filehandle.size):
self.is_imagej = False
warnings.warn("corrupted ImageJ metadata or file")
return
pre = 'tile' if self.is_tiled else 'strip'
self.tags[pre+'_offsets'].value = (offset,)
self.tags[pre+'_byte_counts'].value = (count * images,)
self.shape = (images,) + self.shape
self._shape = (images,) + self._shape[1:]
self.axes = 'I' + self.axes
return True
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True,
maxsize=64*1024*1024*1024):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
maxsize: int or None
Maximum size of data before a ValueError is raised.
Can be used to catch DOS. Default: 64 GB.
"""
if not self._shape:
return
if maxsize and product(self._shape) > maxsize:
raise ValueError("data is too large %s" % str(self._shape))
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
if 'sample_format' in self.tags:
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats do not match %s" % tag.value)
if self.is_chroma_subsampled:
# TODO: implement chroma subsampling
raise NotImplementedError("chroma subsampling not supported")
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
lsb2msb = self.fill_order == 'lsb2msb'
byte_counts, offsets = self._byte_counts_offsets
if self.is_tiled:
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
runlen = image_width
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
if lsb2msb:
reverse_bitorder(result)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x, typecode=typecode):
if self.predictor == 'float':
# the floating point horizontal differencing decoder
# needs the raw byte order
typecode = dtype
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8)) *
(bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpack_rgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpack_ints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
def decompress(x):
return decode_jpeg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = fh.read(bytecount)
if lsb2msb:
tile = reverse_bitorder(tile)
tile = decompress(tile)
tile = unpack(tile)
try:
tile.shape = tile_shape
except ValueError:
# incomplete tiles; see gdal issue #1179
warnings.warn("invalid tile data")
t = numpy.zeros(tile_shape, dtype).reshape(-1)
s = min(tile.size, t.size)
t[:s] = tile[:s]
tile = t.reshape(tile_shape)
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
elif self.predictor == 'float':
raise NotImplementedError()
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
if lsb2msb:
strip = reverse_bitorder(strip)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor and not (self.is_tiled and not self.is_contiguous):
if self.parent.is_lsm and not self.compression:
pass # work around bug in LSM510 software
elif self.predictor == 'horizontal':
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
elif self.predictor == 'float':
result = decode_floats(result)
if colormapped and self.is_indexed:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = apply_colormap(result[:, 0:1, :, :, :, 0:1],
self.color_map)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples,)
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file should remain open if an exception occurred above
fh.close()
return result
@lazyattr
def _byte_counts_offsets(self):
"""Return simplified byte_counts and offsets."""
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
j = 0
for i, (b, o) in enumerate(zip(byte_counts, offsets)):
if b > 0 and o > 0:
if i > j:
byte_counts[j] = b
offsets[j] = o
j += 1
elif b > 0 and o <= 0:
raise ValueError("invalid offset")
else:
warnings.warn("empty byte count")
if j == 0:
j = 1
return byte_counts[:j], offsets[:j]
def _is_memmappable(self, rgbonly, colormapped):
"""Return if page's image data in file can be memory-mapped."""
return (self.parent.filehandle.is_file and
self.is_contiguous and
(self.bits_per_sample == 8 or self.parent._is_native) and
self.fill_order == 'msb2lsb' and
not self.predictor and
not self.is_chroma_subsampled and
not (rgbonly and 'extra_samples' in self.tags) and
not (colormapped and self.is_indexed))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction, fill_order, and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1] or
byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_scn', 'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
result = imagej_description_dict(self.is_imagej)
if 'imagej_metadata' in self.tags:
try:
result.update(imagej_metadata(
self.tags['imagej_metadata'].value,
self.tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""Page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""Page contains contiguous image."""
if 'planar_configuration' in self.tags:
return self.tags['planar_configuration'].value == 1
return True
@lazyattr
def is_indexed(self):
"""Page contains indexed, palette-colored image.
Disable color-mapping for OME, LSM, STK, and ImageJ hyperstacks.
"""
if (self.is_stk or self.is_lsm or self.parent.is_lsm or
self.is_ome or self.parent.is_ome):
return False
if self.is_imagej:
if b'mode' in self.is_imagej:
return False
elif self.parent.is_imagej:
return self.parent.is_indexed
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""Page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""Page is reduced image of another image."""
return ('new_subfile_type' in self.tags and
self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_chroma_subsampled(self):
"""Page contains chroma subsampled image."""
return ('ycbcr_subsampling' in self.tags and
self.tags['ycbcr_subsampling'].value != (1, 1))
@lazyattr
def is_mdgel(self):
"""Page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""Page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""Page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""Page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""Page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""Page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""Page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_vista(self):
"""Software tag is 'ISS Vista'."""
return ('software' in self.tags and
self.tags['software'].value == b'ISS Vista')
@lazyattr
def is_ome(self):
"""Page contains OME-XML in image_description tag."""
if 'image_description' not in self.tags:
return False
d = self.tags['image_description'].value.strip()
return d.startswith(b'<?xml version=') and d.endswith(b'</OME>')
@lazyattr
def is_scn(self):
"""Page contains Leica SCN XML in image_description tag."""
if 'image_description' not in self.tags:
return False
d = self.tags['image_description'].value.strip()
return d.startswith(b'<?xml version=') and d.endswith(b'</scn>')
@lazyattr
def is_shaped(self):
"""Return description containing shape if exists, else None."""
if 'image_description' in self.tags:
description = self.tags['image_description'].value
if b'"shape":' in description or b'shape=(' in description:
return description
if 'image_description_1' in self.tags:
description = self.tags['image_description_1'].value
if b'"shape":' in description or b'shape=(' in description:
return description
@lazyattr
def is_imagej(self):
"""Return ImageJ description if exists, else None."""
if 'image_description' in self.tags:
description = self.tags['image_description'].value
if description.startswith(b'ImageJ='):
return description
if 'image_description_1' in self.tags:
# Micromanager
description = self.tags['image_description_1'].value
if description.startswith(b'ImageJ='):
return description
@lazyattr
def is_micromanager(self):
"""Page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name, _, _, cout_, _ = TIFF_TAGS[code]
if cout_ and cout_ != count:
count = cout_
warnings.warn("incorrect count for tag '%s'" % name)
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (
273, 279, 324, 325, 530, 531):
# scalar value if not strip/tile offsets/byte_counts or subsampling
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes) and
self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _fix_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this case; need example file
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffPageSeries(object):
"""Series of TIFF pages with compatible shape and data type.
Attributes
----------
pages : list of TiffPage
Sequence of TiffPages in series.
dtype : numpy.dtype or str
Data type of the image array in series.
shape : tuple
Dimensions of the image array in series.
axes : str
Labels of axes in shape. See TiffPage.axes.
offset : int or None
Position of image data in file if memory-mappable, else None.
"""
#__slots__ = 'pages', 'shape', 'dtype', 'axes', 'parent'
def __init__(self, pages, shape, dtype, axes, parent=None):
# TODO? sort pages by page number?
self.pages = pages
self.shape = tuple(shape)
self.axes = ''.join(axes)
self.dtype = numpy.dtype(dtype)
if parent:
self.parent = parent
elif len(pages):
self.parent = pages[0].parent
else:
self.parent = None
def asarray(self, memmap=False):
"""Return image data from series of TIFF pages as numpy array.
Parameters
----------
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if self.parent:
return self.parent.asarray(series=self, memmap=memmap)
@lazyattr
def offset(self):
"""Return offset to memory-mappable data in page series."""
if len(self.pages) == 0:
return
rgbonly = False
colormapped = self.pages[0].is_indexed
pos = 0
for page in self.pages:
if page is None:
return
if not page._is_memmappable(rgbonly, colormapped):
return
if not pos:
pos = page.is_contiguous[0] + page.is_contiguous[1]
continue
if pos != page.is_contiguous[0]:
return
pos += page.is_contiguous[1]
offset = self.pages[0].is_contiguous[0]
if (pos != offset + product(self.shape) * self.dtype.itemsize and
not self.pages[0].is_imagej):
return
return offset
def __len__(self):
"""Return number of TiffPages in series."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified TiffPage."""
return self.pages[key]
def __iter__(self):
"""Return iterator over TiffPages in series."""
return iter(self.pages)
def __str__(self):
"""Return string with information about series."""
return "\n".join("* %s: %s" % kv for kv in (
("pages", len(self.pages)),
("dtype", str(self.dtype)),
("shape", str(self.shape)),
("axes", self.axes),
("offset", self.offset)
))
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Attributes
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif") # doctest: +SKIP
>>> tifs.shape, tifs.axes # doctest: +SKIP
((2, 100), 'CT')
>>> data = tifs.asarray() # doctest: +SKIP
>>> data.shape # doctest: +SKIP
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
Binary streams are not supported.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
if not isinstance(files[0], basestring):
raise ValueError("not a file name")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, tempdir=None, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes do not match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile(dir=tempdir) as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern does not match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern does not match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern does not match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes do not match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
A limited, special purpose file handler that:
* handles embedded files (for CZI within CZI files)
* allows to re-open closed files (for multi file formats, such as OME-TIFF)
* reads and writes numpy arrays and records from file like objects
Only 'rb' and 'wb' modes are supported. Concurrently reading and writing
of the same stream is untested.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory-mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_file', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, file, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
file : str, binary stream, or FileHandle
File name or seekable binary stream, such as a open file
or BytesIO.
mode : str
File open mode in case 'file' is a file name. Must be 'rb' or 'wb'.
name : str
Optional name of file in case 'file' is a binary stream.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._file = file
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._file, basestring):
# file name
self._file = os.path.abspath(self._file)
self._dir, self._name = os.path.split(self._file)
self._fh = open(self._file, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._file, FileHandle):
# FileHandle
self._fh = self._file._fh
if self._offset is None:
self._offset = 0
self._offset += self._file._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._file._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._file._name
if self._mode and self._mode != self._file._mode:
raise ValueError('FileHandle has wrong mode')
self._mode = self._file._mode
self._dir = self._file._dir
elif hasattr(self._file, 'seek'):
# binary stream: open file, BytesIO
try:
self._file.tell()
except Exception:
raise ValueError("binary stream is not seekable")
self._fh = self._file
if self._offset is None:
self._offset = self._file.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed binary stream"
try:
self._mode = self._fh.mode
except AttributeError:
pass
else:
raise ValueError("The first parameter must be a file name, "
"seekable binary stream, or FileHandle")
# if self._mode not in (None, 'rb', 'wb', 'r+b', 'rb+', 'w+b', 'wb+'):
# raise ValueError('file mode not supported: %s' % self._mode)
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def write(self, bytestring):
"""Write bytestring to file."""
return self._fh.write(bytestring)
def flush(self):
"""Flush write buffers if applicable."""
return self._fh.flush()
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory-map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def write_array(self, data):
"""Write numpy array to binary file."""
try:
data.tofile(self._fh)
except Exception:
# BytesIO
self._fh.write(data.tostring())
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2 and self._size > 0:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON '%s'" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.ndarray."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for _ in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for _ in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dictionary from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description_dict(description):
"""Return dictionary from ImageJ image description byte string.
Raise ValueError if not a valid ImageJ description.
>>> description = b'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n'
>>> imagej_description_dict(description) # doctest: +SKIP
{'ImageJ': '1.11a', 'images': 510, 'hyperstack': True}
"""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
if 'ImageJ' not in result:
raise ValueError("not a ImageJ image description")
return result
def imagej_description(shape, rgb=None, colormaped=False, version='1.11a',
hyperstack=None, mode=None, loop=None, **kwargs):
"""Return ImageJ image decription from data shape as byte string.
ImageJ can handle up to 6 dimensions in order TZCYXS.
>>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP
ImageJ=1.11a
images=510
channels=2
slices=5
frames=51
hyperstack=true
mode=grayscale
loop=false
"""
if colormaped:
raise NotImplementedError("ImageJ colormapping not supported")
shape = imagej_shape(shape, rgb=rgb)
rgb = shape[-1] in (3, 4)
result = ['ImageJ=%s' % version]
append = []
result.append('images=%i' % product(shape[:-3]))
if hyperstack is None:
#if product(shape[:-3]) > 1:
hyperstack = True
append.append('hyperstack=true')
else:
append.append('hyperstack=%s' % bool(hyperstack))
if shape[2] > 1:
result.append('channels=%i' % shape[2])
if mode is None and not rgb:
mode = 'grayscale'
if hyperstack and mode:
append.append('mode=%s' % mode)
if shape[1] > 1:
result.append('slices=%i' % shape[1])
if shape[0] > 1:
result.append("frames=%i" % shape[0])
if loop is None:
append.append('loop=false')
if loop is not None:
append.append('loop=%s' % bool(loop))
for key, value in kwargs.items():
append.append('%s=%s' % (key.lower(), value))
return str2bytes('\n'.join(result + append + ['']))
def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError("invalid ImageJ hyperstack: not 2 to 6 dimensional")
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise ValueError("invalid ImageJ hyperstack: not a RGB image")
if not rgb and ndim == 6 and shape[-1] != 1:
raise ValueError("invalid ImageJ hyperstack: not a non-RGB image")
if rgb or shape[-1] == 1:
return (1, ) * (6 - ndim) + shape
else:
return (1, ) * (5 - ndim) + shape + (1,)
def image_description_dict(description):
"""Return dictionary from image description byte string.
Raise ValuError if description is of unknown format.
>>> image_description_dict(b'shape=(256, 256, 3)')
{'shape': (256, 256, 3)}
>>> description = b'{"shape": [256, 256, 3], "axes": "YXS"}'
>>> image_description_dict(description) # doctest: +SKIP
{'shape': [256, 256, 3], 'axes': 'YXS'}
"""
if description.startswith(b'shape='):
# old style 'shaped' description
shape = tuple(int(i) for i in description[7:-1].split(b','))
return dict(shape=shape)
if description.startswith(b'{') and description.endswith(b'}'):
# JSON description
return json.loads(description.decode('utf-8'))
raise ValueError("unknown image description")
def image_description(shape, colormaped=False, **metadata):
"""Return image description from data shape and meta data.
Return UTF-8 encoded JSON.
>>> image_description((256, 256, 3), axes='YXS') # doctest: +SKIP
b'{"shape": [256, 256, 3], "axes": "YXS"}'
"""
if colormaped:
shape = shape + (3,)
metadata.update({'shape': shape})
return json.dumps(metadata).encode('utf-8')
def _replace_by(module_function, package=__package__, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if package:
module = import_module('.' + module, package=package)
else:
module = import_module(module)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decode_floats(data):
"""Decode floating point horizontal differencing.
The TIFF predictor type 3 reorders the bytes of the image values and
applies horizontal byte differencing to improve compression of floating
point images. The ordering of interleaved color channels is preserved.
Parameters
----------
data : numpy.ndarray
The image to be decoded. The dtype must be a floating point.
The shape must include the number of contiguous samples per pixel
even if 1.
"""
shape = data.shape
dtype = data.dtype
if len(shape) < 3:
raise ValueError('invalid data shape')
if dtype.char not in 'dfe':
raise ValueError('not a floating point image')
littleendian = data.dtype.byteorder == '<' or (
sys.byteorder == 'little' and data.dtype.byteorder == '=')
# undo horizontal byte differencing
data = data.view('uint8')
data.shape = shape[:-2] + (-1,) + shape[-1:]
numpy.cumsum(data, axis=-2, dtype='uint8', out=data)
# reorder bytes
if littleendian:
data.shape = shape[:-2] + (-1,) + shape[-2:]
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
data = data[..., ::-1]
# back to float
data = numpy.ascontiguousarray(data)
data = data.view(dtype)
data.shape = shape
return data
def decode_jpeg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
from czifile import _czifile
image = _czifile.decode_jpeg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decode_packbits')
def decode_packbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decode_lzw')
def decode_lzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of 'bitw' bits at 'bitcount' position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpack_ints')
def unpack_ints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l,), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpack_rgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpack_rgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpack_rgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpack_rgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
@_replace_by('_tifffile.reverse_bitorder')
def reverse_bitorder(data):
"""Reverse bits in each byte of byte string or numpy array.
Decode data where pixels with lower column values are stored in the
lower-order bits of the bytes (fill_order == 'lsb2msb').
Parameters
----------
data : byte string or ndarray
The data to be bit reversed. If byte string, a new bit-reversed byte
string is returned. Numpy arrays are bit-reversed in-place.
Examples
--------
>>> reverse_bitorder(b'\x01\x64') # doctest: +SKIP
b'\x80&'
>>> data = numpy.array([1, 666], dtype='uint16')
>>> reverse_bitorder(data)
>>> data
array([ 128, 16473], dtype=uint16)
"""
try:
# numpy array
view = data.view('uint8')
table = numpy.array([
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0,
0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4,
0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC,
0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA,
0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6,
0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1,
0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9,
0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED,
0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3,
0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7,
0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF,
0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF], dtype='uint8')
numpy.take(table, view, out=view)
except AttributeError:
# byte string
# TODO: use string translate
data = numpy.fromstring(data, dtype='uint8')
reverse_bitorder(data)
return data.tostring()
def apply_colormap(image, colormap, contig=True):
"""Return palette-colored image.
The image values are used to index the colormap on axis 1. The returned
image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype.
Parameters
----------
image : numpy.ndarray
Indexes into the colormap.
colormap : numpy.ndarray
RGB lookup table aka palette of shape (3, 2**bits_per_sample).
contig : bool
If True, return a contiguous array.
Examples
--------
>>> image = numpy.arange(256, dtype='uint8')
>>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256
>>> apply_colormap(image, colormap)[-1]
array([65280, 65280, 65280], dtype=uint16)
"""
image = numpy.take(colormap, image, axis=1)
image = numpy.rollaxis(image, 0, image.ndim)
if contig:
image = numpy.ascontiguousarray(image)
return image
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy.ndarray
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape do not match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return tuple(shape), ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def reshape_axes(axes, shape, newshape):
"""Return axes matching new shape.
Unknown dimensions are labelled 'Q'.
>>> reshape_axes('YXS', (219, 301, 1), (219, 301))
'YX'
>>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1))
'QQYQXQ'
"""
if len(axes) != len(shape):
raise ValueError("axes do not match shape")
if product(shape) != product(newshape):
raise ValueError("can not reshape %s to %s" % (shape, newshape))
if not axes or not newshape:
return ''
lendiff = max(0, len(shape) - len(newshape))
if lendiff:
newshape = newshape + (1,) * lendiff
i = len(shape)-1
prodns = 1
prods = 1
result = []
for ns in newshape[::-1]:
prodns *= ns
while i > 0 and shape[i] == 1 and ns != 1:
i -= 1
if ns == shape[i] and prodns == prods*shape[i]:
prods *= shape[i]
result.append(axes[i])
i -= 1
else:
result.append('Q')
return ''.join(reversed(result[lendiff:]))
def stack_pages(pages, memmap=False, tempdir=None, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
data0 = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + data0.shape
if memmap:
with tempfile.NamedTemporaryFile(dir=tempdir) as fh:
data = numpy.memmap(fh, dtype=data0.dtype, shape=shape)
else:
data = numpy.empty(shape, dtype=data0.dtype)
data[0] = data0
if memmap:
data.flush()
del data0
for i, page in enumerate(pages[1:]):
data[i+1] = page.asarray(*args, **kwargs)
if memmap:
data.flush()
return data
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00') # doctest: +SKIP
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00') # doctest: +SKIP
b'string\\x00string\\n'
>>> stripascii(b'\\x00') # doctest: +SKIP
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value,)
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
34925: 'lzma',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decode_packbits,
'lzw': decode_lzw,
# 'jpeg': decode_jpeg
}
if lzma:
TIFF_DECOMPESSORS['lzma'] = lzma.decompress
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
'P': 'dimension_p',
'M': 'dimension_m',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, None, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal', 3: 'float'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, None, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
346: ('indexed', 0, 3, 1, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', (1, 1), 3, 2, None),
531: ('ycbcr_positioning', (1, 1), 3, 1, None),
532: ('reference_black_white', None, 5, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, None, None), # use sample_format
32997: ('image_depth', 1, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
51023: ('fibics_xml', None, 2, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported `from matplotlib import pyplot`.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image width and length.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can not handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and
data.shape[-1] < data.shape[-3] // 8 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if isrgb:
data = data[..., :maxdim, :maxdim, :maxdim]
else:
data = data[..., :maxdim, :maxdim]
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
# TODO: handle complex types
raise NotImplementedError("complex type")
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0,) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0,) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def askopenfilename(**kwargs):
"""Return file name(s) from Tkinter's file open dialog."""
try:
from Tkinter import Tk
import tkFileDialog as filedialog
except ImportError:
from tkinter import Tk, filedialog
root = Tk()
root.withdraw()
root.update()
filenames = filedialog.askopenfilename(**kwargs)
root.destroy()
return filenames
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="do not read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="do not display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--vmin', dest='vmin', type='int', default=None,
help="set minimum value for colormapping")
opt('--vmax', dest='vmax', type='int', default=None,
help="set maximum value for colormapping")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
path = askopenfilename(
title="Select a TIFF file",
filetypes=[("TIF files", "*.tif"), ("LSM files", "*.lsm"),
("STK files", "*.stk"), ("allfiles", "*")])
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_indexed:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = settings.vmin, settings.vmax
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = settings.vmin, settings.vmax
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
def str2bytes(s, encoding="latin-1"):
return s.encode(encoding)
else:
def str2bytes(s):
return s
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
mitschabaude/nanopores | scripts/numerics/implicit_vs_explicit.py | 1 | 2013 | "compare implicit and explicit molecule model visually for 2D Howorka pore"
import os, numpy, Howorka
from dolfin import *
from nanopores import *
#from matplotlib.pyplot import figure, plot, legend, show, title, xlabel, ylabel, savefig
add_params(
himp = .2,
hexp = .2,
Nimp = 1e4,
Nexp = 1e4,
z0 = 2.,
Qmol = -1.,
)
# simulate explicit molecule
def explicit():
geo, phys = Howorka.setup2D(z0=z0, h=hexp, Qmol=Qmol)
#plot(geo.pwconst("permittivity"))
pb, pnps = Howorka.solve2D(geo, phys, Nmax=Nexp, cheapest=True)
(v, cp, cm, u, p) = pnps.solutions()
pnps.visualize("fluid")
return geo, phys, v, u
# simulate implicit molecule
def implicit():
geo, phys = Howorka.setup2D(z0=None, h=himp)
pb, pnps = Howorka.solve2D(geo, phys, Nmax=Nimp, cheapest=True)
(v, cp, cm, u, p) = pnps.solutions()
#pnps.visualize("fluid")
return v, u
geo, phys, vexp, uexp = explicit()
vimp, uimp = implicit()
# analytical potential from molecule in homogeneous dielectric fluid
eps = phys.eperm*phys.rpermw
q = phys.qq
lscale = phys.lscale
x0 = [0., z0]
class vAna(Expression):
def eval(self, value, x):
r = sqrt(sum((t - t0)**2 for t, t0 in zip(x, x0)))
value[0] = -q*lscale/(4.*pi*eps*r)
synonyme = dict(
notmol = {"fluid", "dna", "membrane"}
)
geo.import_synonymes(synonyme)
mesh = geo.submesh("notmol")
V = FunctionSpace(mesh, "CG", 1)
v = Function(V)
va = Function(V)
v0 = Function(V)
dv = Function(V)
v.interpolate(vexp)
v0.interpolate(vimp)
plot(v, title="explicit")
plot(v0, title="implicit")
va.interpolate(vAna())
plot(va, title="analytical")
dv.vector()[:] = v.vector()[:] - v0.vector()[:] #- va.vector()[:]
plot(dv, title="difference")
U = VectorFunctionSpace(mesh, "CG", 1)
u = Function(U)
u0 = Function(U)
du = Function(U)
u.interpolate(uexp)
u0.interpolate(uimp)
plot(u, title="explicit")
plot(u0, title="implicit")
du.vector()[:] = u.vector()[:] - u0.vector()[:]
plot(du, title="difference")
interactive()
| mit |
davidgardenier/frbpoppy | tests/monte_carlo/run_mc.py | 1 | 4393 | """Link together all classes to run a full Monte Carlo."""
import pandas as pd
import numpy as np
import frbpoppy.paths
import os
from simulations import MonteCarlo, POP_SIZE
from goodness_of_fit import GoodnessOfFit
from plot import Plot
GENERATE = True
CALC_GOFS = True
RUNS = [10]
class RunOverview:
"""Gather input for each run."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/run_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.df = self.load()
else:
self.df = self.gen_runs()
def gen_run(self):
return {'alpha': None,
'si': None,
'li': None,
'lum_min': None,
'lum_max': None,
'w_mean': None,
'w_std': None,
'dm_igm_slope': None,
'dm_host': None,
'execute': True,
'par_set': 0,
'run': 0}
def gen_runs(self):
runs = []
for i in range(10):
r = self.gen_run()
r['run_number'] = i + 1
r['execute'] = True
r['par_set'] = i % 4 + 1
if i == 9: # Holder for best values
r['execute'] = False
runs.append(r)
df = pd.DataFrame(runs)
df.set_index('run', inplace=True)
return df
def load(self):
df = pd.read_csv(self.filename)
df.run = df.run.astype(int)
df.par_set = df.par_set.astype(int)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
return df
def save(self, df=None):
if df is None:
df = self.df
df.to_csv(self.filename)
if __name__ == '__main__':
print('Commencing')
runs = RunOverview(load_csv=True)
mc = MonteCarlo(pop_size=POP_SIZE)
for i, run in runs.df.iterrows():
run = runs.df.iloc[i]
print('='*50)
print(f'On Run {run.run} with par_set {run.par_set}')
print('='*50)
print(run)
if run.run not in RUNS:
continue
# Generate parameter sets
if GENERATE:
if run.par_set == 1:
mc.gen_par_set_1(lum_min=run.lum_min,
lum_max=run.lum_max,
w_mean=run.w_mean,
w_std=run.w_std,
dm_igm_slope=run.dm_igm_slope,
dm_host=run.dm_host,
run=run.run)
if run.par_set == 2:
mc.gen_par_set_2(alpha=run.alpha,
si=run.si,
w_mean=run.w_mean,
w_std=run.w_std,
dm_igm_slope=run.dm_igm_slope,
dm_host=run.dm_host,
run=run.run)
if run.par_set == 3:
mc.gen_par_set_3(alpha=run.alpha,
si=run.si,
li=run.li,
lum_min=run.lum_min,
lum_max=run.lum_max,
dm_igm_slope=run.dm_igm_slope,
dm_host=run.dm_host,
run=run.run)
if run.par_set == 4:
mc.gen_par_set_4(alpha=run.alpha,
si=run.si,
li=run.li,
lum_min=run.lum_min,
lum_max=run.lum_max,
w_mean=run.w_mean,
w_std=run.w_std,
run=run.run)
# Determine the goodness of fit
gf = GoodnessOfFit()
if CALC_GOFS:
gf.calc_gofs(run.run)
# Find global maximums
gf = GoodnessOfFit()
gms = gf.calc_global_max(run.run)
print('\n')
print(f' Best fits from run {run.run}-> {gms}')
print('\n')
# Adapt the input for future runs
for j in range(i+1, len(runs.df)):
for par in gms:
runs.df.at[j, par] = gms[par][0]
runs.save()
Plot()
| mit |
pv/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/computation/ops.py | 9 | 15234 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return com.pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return np.isscalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return com.pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(com.pprint_thing(opr))
for opr in self.operands)
return com.pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(com.pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = com.pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
_cast_inplace(com.flatten(self), np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return com.pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return com.pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError("\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| artistic-2.0 |
RPGOne/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 50 | 2733 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
griffincalme/MicroDeconvolution | TestingScripts/RetinexFilter.py | 1 | 2673 | #multiscale retinex color restoration?
#adapted from https://gist.github.com/shunsukeaihara/4603234
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
#imgArray = imread('/home/griffin/Desktop/MicroDeconvolution/TestingScripts/SamplePics/SK107 1960 B11_2_CD3_IFN G_Set 2_20X_Take 2.jpg')
#imgArray = imread(r'/home/griffin/Desktop/Screenshot at 2016-07-25 19:46:26.png')
imgArray = imread(r'/home/griffin/Desktop/house.png')
print(type(imgArray))
#plt.imshow(imgArray)
def retinex(nimg):
#rgb array -> splits into three 2d color arrays (one for each of R, G, and B)
nimg = nimg.transpose(2, 0, 1).astype(np.uint32)
max_intens_green = nimg[1].max()
nimg[0] = np.minimum(nimg[0]*(max_intens_green/float(nimg[0].max())),255)
nimg[2] = np.minimum(nimg[2]*(max_intens_green/float(nimg[2].max())),255)
return nimg.transpose(1, 2, 0).astype(np.uint8)
def retinex_adjust(nimg):
"""
from 'Combining Gray World and Retinex Theory for Automatic White Balance in Digital Photography'
"""
nimg = nimg.transpose(2, 0, 1).astype(np.uint32)
sum_r = np.sum(nimg[0])
sum_r2 = np.sum(nimg[0]**2)
max_r = nimg[0].max()
max_r2 = max_r**2
sum_g = np.sum(nimg[1])
max_g = nimg[1].max()
coefficient = np.linalg.solve(np.array([[sum_r2,sum_r],[max_r2,max_r]]),
np.array([sum_g,max_g]))
nimg[0] = np.minimum((nimg[0]**2)*coefficient[0] + nimg[0]*coefficient[1],255)
sum_b = np.sum(nimg[1])
sum_b2 = np.sum(nimg[1]**2)
max_b = nimg[1].max()
max_b2 = max_r**2
coefficient = np.linalg.solve(np.array([[sum_b2,sum_b],[max_b2,max_b]]),
np.array([sum_g,max_g]))
nimg[1] = np.minimum((nimg[1]**2)*coefficient[0] + nimg[1]*coefficient[1],255)
return nimg.transpose(1, 2, 0).astype(np.uint8)
retinexImage = retinex(imgArray)
retinexAdjustedImage = retinex_adjust(retinexImage)
r_adj_w_out = retinex_adjust(imgArray)
"""PLOTTING IMAGES"""
#Plot images
fig, axes = plt.subplots(2, 2, figsize=(12, 11))
#ax0 = axes.ravel()
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(imgArray, cmap=plt.cm.gray, interpolation='nearest')
ax0.set_title("Original")
ax1.imshow(retinexImage, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_title("retinex image")
ax2.imshow(retinexAdjustedImage, cmap=plt.cm.gray)
ax2.set_title("retinex image adjusted")
ax3.imshow(r_adj_w_out, cmap=plt.cm.gray)
ax3.set_title("retinex adjust without retinex function")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
plt.show() | apache-2.0 |
farizrahman4u/keras-contrib | examples/conll2000_chunking_crf.py | 2 | 3642 | """Train CRF and BiLSTM-CRF on CONLL2000 chunking data,
similar to https://arxiv.org/pdf/1508.01991v1.pdf.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy
from collections import Counter
from keras.models import Sequential
from keras.layers import Embedding, Bidirectional, LSTM
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
from keras_contrib.datasets import conll2000
EPOCHS = 10
EMBED_DIM = 200
BiRNN_UNITS = 200
def classification_report(y_true, y_pred, labels):
'''Similar to the one in sklearn.metrics,
reports per classs recall, precision and F1 score'''
y_true = numpy.asarray(y_true).ravel()
y_pred = numpy.asarray(y_pred).ravel()
corrects = Counter(yt for yt, yp in zip(y_true, y_pred) if yt == yp)
y_true_counts = Counter(y_true)
y_pred_counts = Counter(y_pred)
report = ((lab, # label
corrects[i] / max(1, y_true_counts[i]), # recall
corrects[i] / max(1, y_pred_counts[i]), # precision
y_true_counts[i] # support
) for i, lab in enumerate(labels))
report = [(l, r, p, 2 * r * p / max(1e-9, r + p), s) for l, r, p, s in report]
print('{:<15}{:>10}{:>10}{:>10}{:>10}\n'.format('',
'recall',
'precision',
'f1-score',
'support'))
formatter = '{:<15}{:>10.2f}{:>10.2f}{:>10.2f}{:>10d}'.format
for r in report:
print(formatter(*r))
print('')
report2 = list(zip(*[(r * s, p * s, f1 * s) for l, r, p, f1, s in report]))
N = len(y_true)
print(formatter('avg / total',
sum(report2[0]) / N,
sum(report2[1]) / N,
sum(report2[2]) / N, N) + '\n')
# ------
# Data
# -----
# conll200 has two different targets, here will only use
# IBO like chunking as an example
train, test, voc = conll2000.load_data()
(train_x, _, train_y) = train
(test_x, _, test_y) = test
(vocab, _, class_labels) = voc
# --------------
# 1. Regular CRF
# --------------
print('==== training CRF ====')
model = Sequential()
model.add(Embedding(len(vocab), EMBED_DIM, mask_zero=True)) # Random embedding
crf = CRF(len(class_labels), sparse_target=True)
model.add(crf)
model.summary()
# The default `crf_loss` for `learn_mode='join'` is negative log likelihood.
model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.fit(train_x, train_y, epochs=EPOCHS, validation_data=[test_x, test_y])
test_y_pred = model.predict(test_x).argmax(-1)[test_x > 0]
test_y_true = test_y[test_x > 0]
print('\n---- Result of CRF ----\n')
classification_report(test_y_true, test_y_pred, class_labels)
# -------------
# 2. BiLSTM-CRF
# -------------
print('==== training BiLSTM-CRF ====')
model = Sequential()
model.add(Embedding(len(vocab), EMBED_DIM, mask_zero=True)) # Random embedding
model.add(Bidirectional(LSTM(BiRNN_UNITS // 2, return_sequences=True)))
crf = CRF(len(class_labels), sparse_target=True)
model.add(crf)
model.summary()
model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.fit(train_x, train_y, epochs=EPOCHS, validation_data=[test_x, test_y])
test_y_pred = model.predict(test_x).argmax(-1)[test_x > 0]
test_y_true = test_y[test_x > 0]
print('\n---- Result of BiLSTM-CRF ----\n')
classification_report(test_y_true, test_y_pred, class_labels)
| mit |
abinashpanda/pgmpy | docs/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit |
edublancas/python-ds-tools | examples/pipeline/basic/.ipynb_checkpoints/pipeline-checkpoint.py | 2 | 4339 | # This example shows the most basic usage of the `dstools.pipeline` module.
#
# Note: run this using `ipython` or in a Jupyter notebook (it won't run using `python`).
# +
from pathlib import Path
import tempfile
import pandas as pd
from IPython.display import Image, display
from dstools.pipeline import DAG
from dstools.pipeline.tasks import PythonCallable
from dstools.pipeline.products import File
from dstools.pipeline import tasks
# -
# A `DAG` is a workflow representation, it is a collection of `Tasks` that are executed in a given order, each task is associated with a `Product`, which is a persistent change in a system (i.e. a table in a remote database or a file in the local filesystem), a task can products from other tasks as inputs, these are known as upstream dependencies, finally, a task can have extra parameters, but it is recommended to keep these as simple as possible.
# ## Building a simple DAG
dag = DAG(name='my pipeline')
# Let's now build the first tasks, they will just download some data, this pipeline is entirely declared in a single file to simplify things, a real pipeline will likely be splitted among several files.
#
# Tasks can be a lot of things (bash scripts, SQL scripts, etc), for this example they will be Python functions.
# +
# these function pull the data and save it, the product
# parameter is required in every Task
def get_red_wine_data(product):
"""Get red wine data
"""
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv',
sep=';', index_col=False)
# producg is a File type so you have to cast it to a str
df.to_csv(str(product))
def get_white_wine_data(product):
"""Get white wine data
"""
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv',
sep=';', index_col=False)
df.to_csv(str(product))
# if the task has any dependencies, an upstream parameter is required
def concat_data(upstream, product):
"""Concatenate red and white wine data
"""
red = pd.read_csv(str(upstream['red']))
white = pd.read_csv(str(upstream['white']))
df = pd.concat([red, white])
df.to_csv(str(product))
# +
# create a temporary directory to store data
tmp_dir = Path(tempfile.mkdtemp())
# convert our functions to Task objects, note
# that the product is a File object, which means
# this functions will create a file in the local filesystem
red_task = PythonCallable(get_red_wine_data,
product=File(tmp_dir / 'red.csv'),
dag=dag)
white_task = PythonCallable(get_white_wine_data,
product=File(tmp_dir / 'white.csv'),
dag=dag)
concat_task = PythonCallable(concat_data,
product=File(tmp_dir / 'all.csv'),
dag=dag)
# now we declare how our tasks relate to each other
red_task >> concat_task
white_task >> concat_task
# -
# we can plot our dag
path_to_image = dag.plot(open_image=False)
display(Image(filename=path_to_image))
# build the dag (execute all the tasks)
dag.build()
# Each time the DAG is run it will save the current timestamp and the source code of each task, next time we run it it will only run the necessary tasks to get everything up-to-date, there is a simple rule to that: a task will run if its code (or the code from any dependency) has changed since the last time it ran.
# if we build it again, nothing will run
dag.build()
# the plot will show which tasks are up-to-date
# in green
path_to_image = dag.plot(open_image=False)
display(Image(filename=path_to_image))
# status returns a summary of each task status
dag.status()
# ## Inspecting the `DAG` object
#
# The DAG object has utilities to debug and use the pipeline.
# list all tasks in the dag
list(dag)
# get a task
task = dag['red']
task
# task plan returns the source code to be executed along with the input parameters and product
task.plan()
# avoid hardcoding paths to files by loading them directly
# from the DAG, casting a Task to a str, will cause them
# to return a valid representation, in this case, our
# product is a File, so it will return a path to it
df = pd.read_csv(str(dag['red']))
df.head()
| mit |
Garrett-R/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
WillArmentrout/galSims | plotting/VeloPlot.py | 1 | 2033 | import pylab as p
import math
from matplotlib.pyplot import Rectangle # Used to make dummy legend
# Open CSV File
datafile = open('3DHiiRegions.csv', 'r')
csvFile = []
for row in datafile:
csvFile.append(row.strip().split(','))
# Save Galactic Radius Info from CSV to new list
ldata = list()
vdata = list()
galRad = list()
regNum = list()
index = 0
while index < len(csvFile) :
galRad.append(float(csvFile[index][0]))
index += 1
index=0
while index < len(csvFile) :
if galRad[index] > 3 :
ldata.append(float(csvFile[index][8]))
vdata.append(float(csvFile[index][9]))
if (csvFile[index][10] == str(1)) :
regNum.append(str('#FF0000'))
elif csvFile[index][10] == str(2) :
regNum.append(str('black'))
elif csvFile[index][10] == str(3) :
regNum.append(str('y'))
elif csvFile[index][10] == str(4) :
regNum.append(str('g'))
elif csvFile[index][10] == str(5) :
regNum.append(str('b'))
elif csvFile[index][10] == str(6) :
regNum.append(str('#FF9900'))
elif csvFile[index][10] == str(7) :
regNum.append(str('#00FF00'))
elif csvFile[index][10] == str(8) :
regNum.append(str('#00CCFF'))
index += 1
p.scatter(ldata,vdata,s=3,facecolor=regNum, lw = 0)
p.xlabel('Galactic Longitude (deg)')
p.ylabel('VLSR (km/s)')
p.title('Longitude-Velocity Plot for Simulated HII Regions')
prox1 = Rectangle((0, 0), 1, 1, fc="#FF0000")
prox2 = Rectangle((0, 0), 1, 1, fc="black")
prox3 = Rectangle((0, 0), 1, 1, fc="y")
prox4 = Rectangle((0, 0), 1, 1, fc="g")
prox5 = Rectangle((0, 0), 1, 1, fc="b")
prox6 = Rectangle((0, 0), 1, 1, fc="#FF9900")
prox7 = Rectangle((0, 0), 1, 1, fc="#00FF00")
prox8 = Rectangle((0, 0), 1, 1, fc="#00CCFF")
p.legend([prox1, prox2,prox3,prox4,prox5,prox6,prox7,prox8], ["Diffuse", "Bar", "3kpc Arm","Ring","Outer Arm", "Outer Scutum-Centaurus Arm","Perseus Arm","Scutum-Centaurus Arm"])
p.savefig('Longitude-Velocity Plot')
p.show()
| gpl-2.0 |
381426068/MissionPlanner | Lib/site-packages/numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| gpl-3.0 |
scikit-learn-contrib/py-earth | pyearth/export.py | 3 | 8244 | def export_python_function(earth_model):
"""
Exports model as a pure python function, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:return: A function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_factory(earth_model.coef_[0, i]))
i += 1
def func(example_iterator):
return [sum(accessor(row) for accessor in accessors) for row in example_iterator]
return func
def export_python_string(earth_model, function_name="model"):
"""
Exports model as a string that evaluates as python code, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:param function_name: string, optional, will be the name of the function in the returned string
:return: string, when executed (either by writing to a file, or using `exec`, will define a python
function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_string_factory(earth_model.coef_[0, i]))
i += 1
return """def {:s}(example_iterator):
accessors = [{:s}]
for x in example_iterator:
yield sum(accessor(x) for accessor in accessors)
""".format(function_name, ",\n\t\t".join(accessors))
def export_sympy_term_expressions(earth_model):
"""
Construct a list of sympy expressions for all non-pruned terms in the model.
:param earth_model: Trained pyearth model
:return: a list of sympy expressions representing terms in the model. These
expressions are the symbolic equivalent of the Earth.transform method.
"""
from sympy import Symbol, Add, Mul, Max, RealNumber, Piecewise, Pow, And, nan, Function, Not
from ._basis import LinearBasisFunction, HingeBasisFunction, SmoothedHingeBasisFunction, \
MissingnessBasisFunction, ConstantBasisFunction, VariableBasisFunction
Missing = Function('Missing')
NaNProtect = Function('NaNProtect')
def linear_bf_to_factor(bf, bf_var):
return bf_var
def smoothed_hinge_bf_to_factor(bf, bf_var):
knot = RealNumber(bf.get_knot())
knot_minus = RealNumber(bf.get_knot_minus())
knot_plus = RealNumber(bf.get_knot_plus())
r = RealNumber(bf.get_r())
p = RealNumber(bf.get_p())
if bf.get_reverse():
lower_p = (-(bf_var - knot)), (bf_var <= knot_minus)
upper_p = (0, bf_var >= knot_plus)
left_exp = Mul(p, Pow((bf_var - knot_plus), 2))
right_exp = Mul(r, Pow((bf_var - knot_plus), 3))
middle_b = And(knot_minus < bf_var, bf_var < knot_plus)
middle_exp = (Add(left_exp, right_exp), middle_b)
piecewise = Piecewise(lower_p, upper_p, middle_exp)
factor = piecewise
else:
lower_p = (0, bf_var <= knot_minus)
upper_p = (bf_var - knot, bf_var >= knot_plus)
left_exp = Mul(p, Pow((bf_var - knot_minus), 2))
right_exp = Mul(r, Pow((bf_var - knot_minus), 3))
middle_b = And(knot_minus < bf_var, bf_var < knot_plus)
middle_exp = (Add(left_exp, right_exp), middle_b)
piecewise = Piecewise(lower_p, upper_p, middle_exp)
factor = piecewise
return factor
def hinge_bf_to_factor(bf, bf_var):
knot = bf.get_knot()
if bf.get_reverse():
factor = Max(0, RealNumber(knot) - bf_var)
else:
factor = Max(0, bf_var - RealNumber(knot))
return factor
def missingness_bf_to_factor(bf, bf_var):
# This is the error that should be raised when a user attempts to use functionality
# that has not yet been implemented.
if bf.complement:
return Not(Missing(bf_var))
else:
return Missing(bf_var)
def constant_bf_to_factor(bf, bf_var):
return RealNumber(1)
def protect_from_nan(label, missables):
return NaNProtect(Symbol(label)) if label in missables else Symbol(label)
def dont_protect_from_nan(label, missables):
return Symbol(label)
bf_to_factor_dispatcher = {LinearBasisFunction: linear_bf_to_factor,
SmoothedHingeBasisFunction: smoothed_hinge_bf_to_factor,
HingeBasisFunction: hinge_bf_to_factor,
MissingnessBasisFunction: missingness_bf_to_factor,
ConstantBasisFunction: constant_bf_to_factor}
nan_protect_dispatch = {LinearBasisFunction: protect_from_nan,
SmoothedHingeBasisFunction: protect_from_nan,
HingeBasisFunction: protect_from_nan,
MissingnessBasisFunction: dont_protect_from_nan,
ConstantBasisFunction: protect_from_nan}
def bf_to_factor(bf, missables):
'''
Convert a BasisFunction to a factor of a term.
'''
if isinstance(bf, VariableBasisFunction):
bf_var = nan_protect_dispatch[bf.__class__](bf.label, missables)
else:
bf_var = None
return bf_to_factor_dispatcher[bf.__class__](bf, bf_var)
def missingness_bf_get_missables(bf):
bf_var = bf.label
return set([bf_var])
def non_missable(bf):
return set()
bf_get_missables_dispatcher = {LinearBasisFunction: non_missable,
SmoothedHingeBasisFunction: non_missable,
HingeBasisFunction: non_missable,
MissingnessBasisFunction: missingness_bf_get_missables,
ConstantBasisFunction: non_missable}
def get_missables(bf):
missables = bf_get_missables_dispatcher[bf.__class__](bf)
parent = bf.get_parent()
if parent is None:
return missables
else:
missables.update(get_missables(parent))
return missables
def bf_to_term(bf, missables):
'''
Convert a BasisFunction to a term (without coefficient).
'''
term = bf_to_factor(bf, missables)
parent = bf.get_parent()
if parent is None:
return term
else:
return Mul(term, bf_to_term(parent, missables))
return [bf_to_term(bf, get_missables(bf)) for bf in earth_model.basis_.piter()]
def export_sympy(earth_model, columns=None):
"""
Constructs a sympy expression or list of sympy expressions from of a trained earth model.
:param earth_model: Trained pyearth model
:param columns: The index or indices of the output columns for which expressions are to
be constructed. If an integer is used, a sympy expression is returned. If indices
are given then a list of sympy expressions is returned. If columns is None, it is treated
as if columns=0 for models with only one output column or as columns=slice(None) for more than
one output column.
:return: a sympy expression or list of sympy expressions equivalent to the Earth.predict method for
the selected output columns.
"""
# Set a sane default for columns
if columns is None:
if earth_model.coef_.shape[0] == 1:
columns = 0
else:
columns = slice(None)
# Get basis function terms
terms = export_sympy_term_expressions(earth_model)
# Handle column choice
coefs = earth_model.coef_[columns]
if len(coefs.shape) == 1:
unwrap = True
coefs = [coefs]
n_cols = 1
else:
unwrap = False
n_cols = coefs.shape[0]
# Combine coefficients with terms for each output column
result = [sum([coefs[i][j] * term for j, term in enumerate(terms)]) for i in range(n_cols)]
if unwrap:
# Result should be an expression rather than a list of expressions.
result = result[0]
return result
| bsd-3-clause |
elkingtonmcb/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_emf.py | 69 | 22336 | """
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self.__angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self.__angle))
def __str__(self):
return str( (FontProperties.__str__(self), self.__angle))
def set_angle(self,angle):
self.__angle=angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style=%d" % style
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def draw_image(self, x, y, im, bbox):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: %d points" % len(str(x))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: (%f,%f)" % (x,y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: %d points" % len(points)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if debugText: print "draw_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if ismath:
self.draw_math_text(gc,x,y,s,prop,angle)
else:
self.draw_plain_text(gc,x,y,s,prop,angle)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
pyemf.FW_NORMAL, 0, 0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle %d" % handle
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle %d" % handle
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name=%s" % fname
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop))
if ismath:
if debugText: print " MATH TEXT! = %s" % str(ismath)
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF
| agpl-3.0 |
sanketloke/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/series/test_alter_axes.py | 7 | 6881 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import Index, Series
from pandas.core.index import MultiIndex, RangeIndex
from pandas.compat import lrange, range, zip
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAlterAxes(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
tm.assertIsInstance(series.index, Index)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
self.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))
# index with name
renamer = Series(np.arange(4),
index=Index(['a', 'b', 'c', 'd'], name='name'),
dtype='int64')
renamed = renamer.rename({})
self.assertEqual(renamed.index.name, renamer.index.name)
def test_rename_by_series(self):
s = Series(range(5), name='foo')
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list('abcd'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
result = s.rename(name)
self.assertEqual(result.name, name)
self.assert_numpy_array_equal(result.index.values, s.index.values)
self.assertTrue(s.name is None)
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
s.rename(name, inplace=True)
self.assertEqual(s.name, name)
exp = np.array(['a', 'b', 'c'], dtype=np.object_)
self.assert_numpy_array_equal(s.index.values, exp)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name='bar')
for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), u"\u05D0"]:
s.name = name
self.assertEqual(s.name, name)
s2.name = name
self.assertEqual(s2.name, name)
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
self.assertEqual(s2.name, 'foo')
self.assertTrue(s.name is None)
self.assertTrue(s is not s2)
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
self.assertEqual(self.ts.index[0], expected)
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(s.is_time_series)
self.assertTrue(s.index.is_all_dates)
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
self.assertIn('value', df)
df = ser.reset_index(name='value2')
self.assertIn('value2', df)
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
self.assertEqual(len(rs.columns), 2)
rs = s.reset_index(level=[0, 2], drop=True)
self.assert_index_equal(rs.index, Index(index.get_level_values(1)))
tm.assertIsInstance(rs, Series)
def test_reset_index_range(self):
# GH 12071
s = pd.Series(range(2), name='A', dtype='int64')
series_result = s.reset_index()
tm.assertIsInstance(series_result.index, RangeIndex)
series_expected = pd.DataFrame([[0, 0], [1, 1]],
columns=['index', 'A'],
index=RangeIndex(stop=2))
assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
| mit |
DBernardes/ProjetoECC | Eficiência_Quântica/Codigo/QE_calcFluxo.py | 1 | 6677 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 08 de Novembro de 2016
Laboratorio Nacional de Astrofisica, Brazil.
@author: Denis Varise Bernardes & Eder Martioli
Descricao: esta bibloteca possui as seguintes funcoes:
GeraVetorFluxoCamera: esta funcao tem como entrada o header de uma imagem, o numero de imagens com o mesmo comprimento de
onda e o ganho do CCD previamente medido; sua tarefa e fornecer a imagem, tempo de exposicao, o valor da mediana do background
e valor do ganho de cada imagem para a funcao calcFluxo, que retornara o valor do fluxo do CCD, adicionando-o a um vetor.
criaArqFluxoCamera: fornecido os vetores do fluxo da camera e desvio padrao do fluxo, esta funcao cria o arquivo Fluxocamera.dat,
escrevendo o conteudo destes dois vetores em duas colunas.
calcFluxo: dado os parametros: imagem, tempo de exposicao, medianBackground, stdBackground e ganho, esta funcao calculara o
valor do fluxo de fotons para a regiao dentro de uma caixa de pixels. Para tanto, faz a chamada da funcao caixaPixels.
caixaPixels: fornecida a imagen e os valores das coordenadas centrais de dimensao da caixa, retorna um array dos pixels internos
a essa regiao.
getVetorEtime: dado o numero de imagens com mesmo adquiridas para o mesmo comprimento de onda, retorna o valor do tempo de
exposicao da lista de imagens.
getDadosBackground: esta funcao faz a leitura dos dados do arquivo dadosBackground.dat gerado anteriormente, retornando dois
vetores: mediana e desvio padrao do background.
CalcErroDetector: esta funcao tem como proposito calcular a porcentagem do erro do detector para um dado comprimento de onda.
O funcionamento desta funcao foi baseada na descricao do manual de operaçao do dispositivo (OL-750-HSD-301C, Optronic Laboratories, Inc.).
FluxoRelativo: esta funcao faz o calculo do fluxo relativo entre o CCD e o detector; junto a isso, faz a correcao do valor obtido
para a curva de calibracao do detector (realiza a leitura do arquivo da curva de calibracao do detector fornecido pela chamada da
funcao LeArq_curvaCalibDetector). Faz a somatoria da variancia das imagens com a variancia do detector (obtido pela chamada da
funcao CalcErroDetector), convertendo adicionando seu valor a um vetor.
example: ./BackgroundCompleto.py --list=list
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import astropy.io.fits as fits
import numpy as np
import os
from math import sqrt, cos
from scipy.interpolate import interp1d
from QE_reduceImgs_readArq import LeArquivoReturnLista, LeArq_curvaCalibFiltroDensidade
from QE_GraphLib import returnMax
import matplotlib.pyplot as plt
def GeraVetorFluxoCamera(header,ganho, tagDado, tagRef, lenPixel, Dfotometro, images_path):
print('\nCalculando o fluxo total da camera \n')
chdir = images_path + '\\' + 'Imagens_reduzidas'
coordx = int(header['naxis1']/2)
coordy = int(header['naxis2']/2)
Cdimension = int(Dfotometro/(lenPixel*1e-3)) #dimensao do fotometro dividido pelo tamanho do pixel do CCD = numero de pixels necessarios para a dimensao da caixa de pixels.
etime2 = getVetorEtime(tagDado, images_path)
etime1 = getVetorEtime(tagRef, images_path)
VetorStdDiff = getStdDiffImages(images_path)
vetorFluxoCamera, i = [], 0
with open(chdir + '\\' + 'listaImagensReduzidas') as f:
lista = f.read().splitlines()
for img in lista:
data = fits.getdata(chdir + '\\' + img)
data = caixaPixels(data,(coordx,coordy,Cdimension))
fluxo = calcFluxo(data, etime2[i]-etime1[i], ganho)
vetorFluxoCamera.append(fluxo)
i+=1
f.close()
criaArqFluxoCamera(vetorFluxoCamera, VetorStdDiff, images_path)
def caixaPixels(imagem, tupla):
#retira apenas uma caixa de pixels, dada as coordenadas (x,y) e seu tamanho
xcoord = tupla[0]
ycoord = tupla[1]
dimension = int(tupla[2]/2)
d = dimension
imagem = imagem[xcoord-d:xcoord+d,ycoord-d:ycoord+d]
return imagem
def calcFluxo(data, etime, ganho):
Somapixels = sum(sum(data))*ganho #soma dos valores dos pixels subt. do Background mediano
fluxoImagem = Somapixels/etime #contagens totais pelo tempo de exposicao
return fluxoImagem
def FluxoRelativo(Fluxocamera,Fluxodetector, Stdcamera, Strespectro, nomeArq_CalibFiltroDensidade, images_path):
vetorEQ, vetorSigmaTotal = [], []
Split_Str_espectro = Strespectro.split(',')
Einicial = int(Split_Str_espectro[0])
Efinal = int(Split_Str_espectro[1])
step = int(Split_Str_espectro[2])
n = int((Efinal - Einicial)/step) + 1
espectro = np.linspace(Einicial, Efinal, n)
VetorFiltroDensidade = LeArq_curvaCalibFiltroDensidade(nomeArq_CalibFiltroDensidade, n, images_path)
for i in range(len(Fluxocamera)):
h = 6.62607004e-34
c = 299792458 #m/s
ErroPorcentDetector = CalcErroDetector(espectro[i])
#caso nao seja fornecido o nome do arquivo do filtro de densidade, a funcao retornara um vetor contendo apenas o valor 1.
A = Fluxocamera[i]*100
B = VetorFiltroDensidade[i]*Fluxodetector[i]*espectro[i]*1e-9/(h*c)
sigmaDetector = ErroPorcentDetector*B
EQ = A/B
#print(A/(VetorFiltroDensidade[i]*espectro[i]*1e-9/(h*c))/0.27)
varianceTotal = EQ**2*((Stdcamera[i]/A)**2+(sigmaDetector/B)**2)
vetorEQ.append(EQ)
vetorSigmaTotal.append(sqrt(varianceTotal))
i+=1
return vetorEQ, vetorSigmaTotal
def getVetorEtime(tagDado, images_path):
arquivoListaImagens = tagDado+'List.txt'
vetorEtime = []
listaImagens = LeArquivoReturnLista(arquivoListaImagens, images_path)
for i in range(len(listaImagens)):
header = fits.getheader(listaImagens[i])
vetorEtime.append(header['exposure'])
return vetorEtime
def getStdDiffImages(images_path):
StdDiff = []
with open(images_path + '\\' + 'StdDiffImages') as arq:
listaValues = arq.read().splitlines()
for linha in listaValues[1:]:
StdDiff.append(float(linha))
arq.close()
return StdDiff
def CalcErroDetector(Comp_onda):
photodiodeError = 0
NIST_OLSD = 0.005
if 250< Comp_onda < 400:
photodiodeError = 0.010
if 400< Comp_onda < 900:
photodiodeError = 0.005
if 900< Comp_onda < 1000:
photodiodeError = 0.022
if 1000< Comp_onda < 1100:
photodiodeError = 0.022
ErroDetector = NIST_OLSD**2 + photodiodeError**2
return sqrt(ErroDetector)
def criaArqFluxoCamera(VetorF, vetorSigma, images_path):
nome = images_path + '\\' + 'Fluxo camera.dat'
try:
arq = open(nome, 'w')
except:
nome.remove()
arq = open(nome, 'w')
arq.write(' Fluxo (e/s) Sigma (e/s)\n')
for i in range(len(VetorF)):
arq.write('%e \t\t\t %f\n' %(VetorF[i], vetorSigma[i]))
arq.close()
| mit |
jmargeta/scikit-learn | sklearn/utils/arpack.py | 4 | 64317 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatable.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatable. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error"
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real or complex square matrix.
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
import warnings
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
import warnings
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
def matvec_XH_X(x):
return XH.dot(X.dot(x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
u = X.dot(v) / s
vh = herm(v)
else:
u = eigvec
vh = herm(X.dot(u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
DTOcean/dtocean-core | dtocean_core/interfaces/plots_array_layout.py | 1 | 19809 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Wed Apr 06 15:59:04 2016
.. moduleauthor:: Mathew Topper <[email protected]>
"""
import numpy as np
import matplotlib.pyplot as plt
from descartes import PolygonPatch
from shapely.geometry import Point, Polygon
from . import PlotInterface
BLUE = '#6699cc'
GREEN = '#32CD32'
RED = '#B20000'
GREY = '#999999'
class ArrayLeasePlot(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Lease Area Array Layout"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = ["site.lease_boundary",
"project.layout",
"options.boundary_padding"
]
return input_list
@classmethod
def declare_optional(cls):
option_list = ["options.boundary_padding"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"lease_poly": "site.lease_boundary",
"layout": "project.layout",
"padding": "options.boundary_padding"
}
return id_map
def connect(self):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
short_layout = {key.replace("device", ""): value
for key, value in self.data.layout.items()}
plot_point_dict(ax1, short_layout, "k+")
plot_lease_boundary(ax1, self.data.lease_poly, self.data.padding)
ax1.margins(0.1, 0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.title("Array Layout in Lease Area")
plt.tight_layout()
self.fig_handle = plt.gcf()
return
class ArrayLeasePlotNumbers(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Lease Area Array Layout (With Device Numbers)"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = ["site.lease_boundary",
"project.layout",
"options.boundary_padding"
]
return input_list
@classmethod
def declare_optional(cls):
option_list = ["options.boundary_padding"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"lease_poly": "site.lease_boundary",
"layout": "project.layout",
"padding": "options.boundary_padding"
}
return id_map
def connect(self):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
short_layout = {key.replace("device", ""): value
for key, value in self.data.layout.items()}
plot_point_dict(ax1, short_layout, "k+", annotate=True)
plot_lease_boundary(ax1, self.data.lease_poly, self.data.padding)
ax1.margins(0.1, 0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.title("Array Layout in Lease Area")
plt.tight_layout()
self.fig_handle = plt.gcf()
return
class ArrayCablesPlot(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Array Cable Layout"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = ["site.lease_boundary",
"corridor.landing_point",
"project.layout",
"project.cable_routes",
"project.substation_layout"
]
return input_list
@classmethod
def declare_optional(cls):
option_list = ["site.lease_boundary"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"lease_poly": "site.lease_boundary",
"landing_point": "corridor.landing_point",
"layout": "project.layout",
"cable_routes": "project.cable_routes",
'substation_layout': 'project.substation_layout',
}
return id_map
def connect(self):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
short_layout = {key.replace("device", ""): value
for key, value in self.data.layout.items()}
landing_dict = {"Export Cable Landing": self.data.landing_point}
lplot = plot_point_dict(ax1,
landing_dict,
'or',
"Export Cable Landing")
dplot = plot_point_dict(ax1, short_layout, "k+", "Devices")
splot = plot_point_dict(ax1,
self.data.substation_layout,
"gs",
"Collection Points")
plot_cables(ax1, self.data.cable_routes)
if self.data.lease_poly is not None:
plot_lease_boundary(ax1, self.data.lease_poly)
ax1.margins(0.1, 0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.legend(handles=[dplot, splot, lplot],
bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.,
numpoints=1)
plt.title("Electrical Cable Layout")
plt.tight_layout(rect=[0, 0.04, 0.84, 0.96])
self.fig_handle = plt.gcf()
return
class ArrayFoundationsPlot(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Array Foundations Layout"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = ["site.lease_boundary",
"project.layout",
"project.substation_layout",
"project.foundations_component_data"
]
return input_list
@classmethod
def declare_optional(cls):
option_list = ["site.lease_boundary"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"lease_poly": "site.lease_boundary",
"layout": "project.layout",
'substation_layout': 'project.substation_layout',
"foundations_components":
"project.foundations_component_data",
}
return id_map
def connect(self):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
short_layout = {key.replace("device", ""): value
for key, value in self.data.layout.items()}
dplot = plot_point_dict(ax1,
short_layout,
"k+",
"Devices")
splot = plot_point_dict(ax1,
self.data.substation_layout,
"gs",
"Collection Points")
foundation_marker = {'shallowfoundation': "b_",
'gravity': "r^",
'pile': "co",
'suctioncaisson': "mp",
'directembedment': "y|",
'drag': "kv"}
foundation_name = {'shallowfoundation': "Shallow",
'gravity': "Gravity",
'pile': "Pile",
'suctioncaisson': "Suction Caisson",
'directembedment': "Direct Embedment",
'drag': "Drag"}
foundations_locations = self.data.foundations_components[
["Type", "UTM X", "UTM Y"]]
locations_groups = foundations_locations.groupby("Type")
foundations_handles = []
for name, group in locations_groups:
plot_marker = foundation_marker[name]
plot_name = foundation_name[name]
coords = group[["UTM X", "UTM Y"]].values
plot_dict = {i: Point(xy) for i, xy in enumerate(coords)}
fplot = plot_point_dict(ax1,
plot_dict,
plot_marker,
plot_name,
markersize=8)
foundations_handles.append(fplot)
if self.data.lease_poly is not None:
plot_lease_boundary(ax1, self.data.lease_poly)
ax1.margins(0.1, 0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
all_handles = [dplot, splot] + foundations_handles
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.legend(handles=all_handles,
bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.,
numpoints=1)
plt.title("Foundations Layout")
plt.tight_layout()
self.fig_handle = plt.gcf()
return
def plot_point_dict(ax,
layout,
marker,
label=None,
annotate=False,
markersize=None,
text_weight=None,
text_size=None):
if text_weight is None: text_weight = 'normal'
if text_size is None: text_size = 'medium'
x = []
y = []
for coords in layout.itervalues():
x.append(coords.x)
y.append(coords.y)
kwargs = {"mew": 2,
"markersize": 10}
if label is not None: kwargs["label"] = label
if markersize is not None: kwargs["markersize"] = markersize
pplot = ax.plot(x, y, marker, **kwargs)
if not annotate: return pplot[0]
for key, point in layout.iteritems():
coords = list(point.coords)[0]
ax.annotate(str(key),
xy=coords[:2],
xytext=(0, 10),
xycoords='data',
textcoords='offset pixels',
horizontalalignment='center',
weight=text_weight,
size=text_size)
return pplot[0]
def plot_lease_boundary(ax, lease_boundary, padding=None):
if padding is not None:
outer_coords = list(lease_boundary.exterior.coords)
inner_boundary = lease_boundary.buffer(-padding)
inner_coords = list(inner_boundary.exterior.coords)
# Check if the orientation of the polygons are the same
if clockwise(*zip(*inner_coords)) == clockwise(*zip(*outer_coords)):
inner_coords = inner_coords[::-1]
lease_boundary = Polygon(outer_coords, [inner_coords])
patch = PolygonPatch(lease_boundary,
fc=RED,
fill=True,
alpha=0.3,
ls=None)
ax.add_patch(patch)
patch = PolygonPatch(lease_boundary,
ec=BLUE,
fill=False,
linewidth=2)
ax.add_patch(patch)
maxy = lease_boundary.bounds[3] + 50.
centroid = np.array(lease_boundary.centroid)
ax.annotate("Lease Area",
xy=(centroid[0], maxy),
horizontalalignment='center',
verticalalignment='bottom',
weight="bold",
size='large')
return
def plot_cables(ax, cable_routes):
cables = cable_routes.groupby("Marker")
xmax = -np.inf
ymax = -np.inf
xmin = np.inf
ymin = np.inf
for name, cable in cables:
x = cable["UTM X"]
y = cable["UTM Y"]
xmax = max(list(x) + [xmax])
ymax = max(list(y) + [ymax])
xmin = min(list(x) + [xmin])
ymin = min(list(y) + [ymin])
line = plt.Line2D(x, y)
ax.add_line(line)
def clockwise(x, y):
""" Use the shoelace formula to determine whether the polygon points are
defined in a clockwise direction"""
# https://stackoverflow.com/a/1165943/3215152
# https://stackoverflow.com/a/19875560/3215152
if sum(x[i] * (y[i + 1] - y[i - 1]) for i in xrange(-1, len(x) - 1)) < 0:
return True
return False
| gpl-3.0 |
fedhere/SESNCfAlib | gps.py | 1 | 6537 |
def readin(f):
thissn=snstuff.mysn(f, addlit=True)
################read supernova data and check metadata
lc, flux, dflux, snname = thissn.loadsn(f, fnir=True, verbose=True,
lit=True, addlit=True)
#thissn.printsn()
#raw_input()
thissn.readinfofileall(verbose=False, earliest=False, loose=True)
#thissn.printsn()
Dl = float(thissn.metadata['luminosity distance Mpc'])
su = templutils.setupvars()
thissn.setsn(thissn.metadata['Type'], thissn.Vmax)
myebmv=su.ebmvs[thissn.snnameshort]
print ("E(B-V)", myebmv)
myebmv+=hostebmv#su.ebmvcfa[thissn.snnameshort]
print ("E(B-V) total", myebmv)
Vmax = thissn.Vmax
thissn.setphot()
thissn.getphot(myebmv)
thissn.setphase()
thissn.printsn(photometry=False)
#thissn.printsn()
fig = pl.figure(figsize=(5,3))
thissn.plotsn(photometry=True, show=True, fig = fig)
return thissn
def skgp (x, y, yerr, phases, t0):
from sklearn.gaussian_process import GaussianProcess
XX = np.atleast_2d(np.log(x-min(x)+1)).T
#XX = np.atleast_2d(x).T
gphere = GaussianProcess(corr='squared_exponential',
theta0=t0,
thetaL=t0*0.1,
thetaU=t0*10,
nugget=(yerr / y) ** 2,
random_start=100)
gphere.fit(XX, y)
xx = np.atleast_2d(np.log(phases-min(X)+1)).T
#xx = np.atleast_2d(phases).T
y_pred, MSE = gphere.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
return (y_pred, sigma)
def georgegp (x, y, yerr, phases, kc, kc1):
import george
# Set up the Gaussian process.
kernel = kc1 * 10 * kernelfct(kc)#ExpSquaredKernel(1.0)
gp = george.GP(kernel)
#print ("wtf", gp.kernel)
# adding a small random offset to the phase so that i never have
# 2 measurements at the same time which would break the GP
# Pre-compute the factorization of the matrix.
XX = x
XX = np.log(XX-XX.min()+1)
# You need to compute the GP once before starting the optimization.
gp.compute(XX, yerr)
# Print the initial ln-likelihood.
#print("here", gp.lnlikelihood(y))
#print("here", gp.grad_lnlikelihood(y))
# Run the optimization routine.
if OPT:
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll, args=(gp))
print results.x
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
#print(gp.lnlikelihood(y))
# gp.compute(XX, yerr)
# Compute the log likelihood.
#print(gp.lnlikelihood(y))
#t = np.linspace(0, 10, 500)
##xx = np.log(phases-min(X)+1)
xx = phases
xx = np.log(xx-x.min()+1)
mu, cov = gp.predict(y, xx)
std = np.sqrt(np.diag(cov))
return (mu, std)
import scipy.optimize as op
# Define the objective function (negative log-likelihood in this case).
def nll(p, gp):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p, gp):
# Update the kernel parameters and compute the likelihood.
print ("wtf2", gp.kernel)
gp.kernel[:] = p
print (gp.kernel[:])
print -gp.grad_lnlikelihood(y, quiet=True)
return -gp.grad_lnlikelihood(y, quiet=True)
def getskgpreds(ts, x, y, yerr, phases, fig = None):
t0, t1 = ts
if t0 ==0 or t1==0:
return 1e9
#print (t0,t1)
gp1, gp2 = georgegp(x, y, yerr, x, t0, t1)
s1= sum(((gp1-y)/yerr)**2)/len(y)
#pl.figure(figsize=(1,3))
#pl.plot(x, gp1,'*')
gp1, gp2 = georgegp(x, y, yerr, phases, t0, t1)
s2= sum(np.abs((gp1[2:]+gp1[:-2]-2*gp1[1:-1])/\
(diff(phases)[1:]+diff(phases)[:-1])))
print ("%.3f"%t0, "%.3f"%t1, "%.1f"%s1, "%.3f"%s2, s1*s2)
if fig:
pl.errorbar(x,y,yerr=yerr,fmt='.')
pl.plot(phases, gp1,'-')
pl.fill_between(phases, gp1-gp2, gp1+gp2, color='k')
pl.title("%.3f %.3f %.3f"%(t0, t1, (s1*s2)), fontsize=15)
pl.ylim(pl.ylim()[1], pl.ylim()[0])
if isfinite(s1*s2) and not np.isnan(s1*s2):
return s1*s2
return 1e9
def kernelfct(kc):
from george.kernels import ExpSquaredKernel, WhiteKernel, ExpKernel, Matern32Kernel
return ExpSquaredKernel(kc)# Matern32Kernel(kc)
from scipy import stats
sn = '08D'
b='V'
def findgp(sn, b):
fall = glob.glob(os.getenv('SESNPATH')+'/finalphot/*'+sn+'*[cf]')
if len(fall)>0:
fall[-1] = [fall[-1]] + \
[ff for ff in glob.glob(os.environ['SESNPATH']+\
"/literaturedata/phot/*"+sn+".*[cf]")]
else: fall = [[ff for ff in glob.glob(os.environ['SESNPATH']+"/literaturedata/phot/*"+sn+".*[cf]")]]
f = fall[0]
if not isinstance (f, basestring):
f=f[0]
x= thissn.photometry[b]['phase']
x+=0.01*np.random.randn(len(x))
y= thissn.photometry[b]['mag']
yerr= thissn.photometry[b]['dmag']
phases = np.arange(x.min(),x.max(),0.1)
if x.max()<=30:
if x.min()<=-15:
x15 = np.where(np.abs(x+15)==np.abs(x+15).min())[0]
print x15, y[x15[0]]+0.5
x = np.concatenate([x,[30]])
y = np.concatenate([y,[y[x15[0]]+0.5]])
yerr = np.concatenate([yerr,[0.5]])
print (x,y,yerr)
elif (x>=15).sum()>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(x[x>=15],y[x>=15])
x = np.concatenate([x,[30]])
y = np.concatenate([y,[slope*30.+intercept]])
yerr = np.concatenate([yerr,[yerr.max()*2]])
print (x,y,yerr)
else:
return -1
#fig = pl.figure(figsize=(10,3))
results = op.minimize(getskgpreds, (0.4,1.0), args = (x,y,yerr,phases), bounds=((0,None),(0,None)), tol=1e-8)
print (results.x)
#t1s = np.exp(np.arange(-2,2,0.5))
#for tt in np.exp(np.arange(-2,2,0.5)):
# fig = pl.figure(figsize=(10,3))
# for i,ttt in enumerate(t1s):
# ax = fig.add_subplot(len(t1s),1,i+1)
# getskgpreds(x,y,yerr,phases,tt,ttt)
fig = pl.figure(figsize=(10,3))
getskgpreds(results.x,x,y,yerr,phases, fig)
pl.ylabel(b+" magnitude")
| mit |
YuxingZhang/prescription | visualize/tsne.py | 1 | 5462 | #
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.7.10, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
#
# The example can be run by executing: `ipython tsne.py`
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
import pickle as pk
import sys
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta));
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print "Error: array X should have type float.";
return -1;
if round(no_dims) != no_dims:
print "Error: number of dimensions should be an integer.";
return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
''' usage: python tsne.py <x_file_name> <y_file_name> <output_file_name>
'''
x_file = sys.argv[1]
y_file = sys.argv[2]
output_file = sys.argv[3]
X = Math.loadtxt(x_file);
labels = Math.loadtxt(y_file);
Y = tsne(X, 2, 50, 20.0);
f = open(output_file, 'w')
pk.dump(Y, f)
f.close()
#Plot.scatter(Y[:,0], Y[:,1], 20, labels);
#Plot.show();
| bsd-3-clause |
Og192/Python | machine-learning-algorithms/rnn/vanillaRnn.py | 4 | 4077 | from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_epoches = 100
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
num_classes = 2
echo_step = 3
batch_size = 5
num_batches = total_series_length // batch_size // truncated_backprop_length
def generateData():
x = np.array(np.random.choice(2, total_series_length, p = [0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1))
y = y.reshape((batch_size, -1))
return (x, y)
def plot(loss_list, predictions_series, batchX, batchY):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:, batch_series_idx, :]
single_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, truncated_backprop_length, 0, 2])
left_offset = range(truncated_backprop_length)
plt.bar(left_offset, batchX[batch_series_idx, :], width = 1, color = "blue")
plt.bar(left_offset, batchY[batch_series_idx, :] * 5, width = 1, color = "red")
plt.bar(left_offset, single_output_series * 0.3, width = 1, color = "green")
plt.draw()
plt.pause(0.0001)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [batch_size, state_size])
W = tf.Variable(np.random.rand(state_size + 1, state_size), dtype=tf.float32)
b = tf.Variable(np.zeros((1, state_size)), dtype=tf.float32)
W2 = tf.Variable(np.random.rand(state_size, num_classes), dtype = tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)
inputs_series = tf.unpack(batchX_placeholder, axis = 1)
labels_series = tf.unpack(batchY_placeholder, axis = 1)#, axis = 1
#rnn layer
current_state = init_state
states_series = []
for current_input in inputs_series:
current_input = tf.reshape(current_input, [batch_size, 1])
input_and_state_concatenated = tf.concat(1, [current_input, current_state])
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b)
states_series.append(next_state)
current_state = next_state
# softmax layer
logits_series = [tf.matmul(state, W2) + b2 for state in states_series]
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels) for logits, labels in zip(logits_series, labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
plt.ion()
plt.figure()
plt.show()
loss_list = []
for epoch_idx in range(num_epoches):
#generate data, input, labels, and initial state vector.
x, y = generateData()
_current_state = np.zeros((batch_size, state_size))
print("New data, epoch", epoch_idx)
for batch_idx in range(num_batches):
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
batchY = y[:, start_idx:end_idx]
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder:batchX,
batchY_placeholder:batchY,
init_state:_current_state
}
)
loss_list.append(_total_loss)
if batch_idx % 100 == 0:
print("Step", batch_idx, "Loss", _total_loss)
plot(loss_list, _predictions_series, batchX, batchY)
plt.ioff()
plt.show() | gpl-2.0 |
nmabhi/Webface | demos/web/websocket-server-persist.py | 1 | 17183 | #!/usr/bin/env python2
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
fileDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(fileDir, "..", ".."))
import txaio
txaio.use_twisted()
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from twisted.internet import task, defer
from twisted.internet.ssl import DefaultOpenSSLContextFactory
from twisted.python import log
import argparse
import cv2
import imagehash
import json
from PIL import Image
import numpy as np
import os
import StringIO
import urllib
import base64
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.manifold import TSNE
from sklearn.svm import SVC
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import openface
import pickle, pprint
import json
from numpy import genfromtxt
modelDir = os.path.join(fileDir, '..', '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
# For TLS connections
tls_crt = os.path.join(fileDir, 'tls', 'server.crt')
tls_key = os.path.join(fileDir, 'tls', 'server.key')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False,
help='Try to predict unknown people')
parser.add_argument('--port', type=int, default=9000,
help='WebSocket Port')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
def loadImages():
try:
with open('images.pkl', 'rb') as f:
# if sys.version_info[0] < 3:
images = pickle.load(f)
return images
except Exception as e:
return {}
#my_data = genfromtxt('people.csv', delimiter=',')
def loadModel():
# model = open('model.pkl', 'r')
# svm_persisted = pickle.load('model.pkl')
# output.close()
# return svm_persisted
# return True
try:
with open('model.pkl', 'rb') as f:
# if sys.version_info[0] < 3:
mod = pickle.load(f)
return mod
except Exception as e:
return None
def loadPeople():
try:
with open('people.pkl', 'rb') as f:
mod = pickle.load(f)
return mod
except Exception as e:
return []
# def calculateNumberOfImagesPerPerson(data_images):
# images_count = {}
# # print data_images
# for image in data_images:
# # print image, data_images[image]
# # print data_images[image].identity
# if(data_images[image].identity in images_count):
# images_count[data_images[image].identity] += 1
# else:
# images_count[data_images[image].identity] = 1
# # print images_count
# sorted_images_count = {}
# for key in sorted(images_count.iterkeys(), reverse=True):
# sorted_images_count[key] = images_count[key]
# # print sorted_images_count
# return sorted_images_count
class Face:
def __init__(self, rep, identity):
self.rep = rep
self.identity = identity
def __repr__(self):
return "{{id: {}, rep[0:5]: {}}}".format(
str(self.identity),
self.rep[0:5]
)
class OpenFaceServerProtocol(WebSocketServerProtocol):
def __init__(self):
super(OpenFaceServerProtocol, self).__init__()
self.images = loadImages()
self.training = True
self.people = loadPeople()
self.svm = loadModel()
print self.images,self.people
# self.images_count = calculateNumberOfImagesPerPerson(self.images)
# print self.people
# print self.images
if args.unknown:
self.unknownImgs = np.load("./examples/web/unknown.npy")
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
self.training = True
def onOpen(self):
print("WebSocket connection open.")
# calculateNumberOfImagesPerPerson(self.images)
# print images_count
# msg = {
# "type": "INITIALIZE",
# "people": self.people,
# "images": calculateNumberOfImagesPerPerson(self.images)
# }
# self.sendMessage(json.dumps(msg))
def onMessage(self, payload, isBinary):
raw = payload.decode('utf8')
msg = json.loads(raw)
print("Received {} message of length {}.".format(
msg['type'], len(raw)))
if msg['type'] == "ALL_STATE":
self.loadState(msg['images'], msg['training'], msg['people'])
print msg['images']
elif msg['type'] == "NULL":
self.sendMessage('{"type": "NULL"}')
elif msg['type'] == "FRAME":
print "Frame message Identity", msg['identity']
#print "Frame message Url",msg['dataURL']
f = open( 'url.py', 'w' )
f.write( 'url = ' + repr(dict) + '\n' )
f.close()
self.processFrame(msg['dataURL'], msg['identity'])
# print msg['identity']
self.sendMessage('{"type": "PROCESSED"}')
elif msg['type'] == "TRAINING":
self.training = msg['val']
if not self.training:
self.trainSVM()
elif msg['type'] == "ADD_PERSON":
if msg['val'].encode('ascii','ignore') not in self.people:
self.people.append(msg['val'].encode('ascii', 'ignore'))
self.people=self.people
#np.savetxt("people.csv", self.people, delimiter=",")
with open('people.pkl', 'w') as f:
pickle.dump(self.people, f)
print(self.people)
elif msg['type'] == "UPDATE_IDENTITY":
h = msg['hash'].encode('ascii', 'ignore')
if h in self.images:
self.images[h].identity = msg['idx']
if not self.training:
self.trainSVM()
else:
print("Image not found.")
elif msg['type'] == "REMOVE_IMAGE":
h = msg['hash'].encode('ascii', 'ignore')
if h in self.images:
del self.images[h]
if not self.training:
self.trainSVM()
else:
print("Image not found.")
elif msg['type'] == 'REQ_TSNE':
self.sendTSNE(msg['people'])
else:
print("Warning: Unknown message type: {}".format(msg['type']))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
def loadState(self, jsImages, training, jsPeople):
self.training = training
print jsImages
for jsImage in jsImages:
h = jsImage['hash'].encode('ascii', 'ignore')
self.images[h] = Face(np.array(jsImage['representation']),
jsImage['identity'])
for jsPerson in jsPeople:
self.people.append(jsPerson.encode('ascii', 'ignore'))
if not training:
self.trainSVM()
def getData(self):
X = []
y = []
for img in self.images.values():
X.append(img.rep)
y.append(img.identity)
numIdentities = len(set(y + [-1])) - 1
if numIdentities == 0:
return None
if args.unknown:
numUnknown = y.count(-1)
numIdentified = len(y) - numUnknown
numUnknownAdd = (numIdentified / numIdentities) - numUnknown
if numUnknownAdd > 0:
print("+ Augmenting with {} unknown images.".format(numUnknownAdd))
for rep in self.unknownImgs[:numUnknownAdd]:
# print(rep)
X.append(rep)
y.append(-1)
X = np.vstack(X)
y = np.array(y)
return (X, y)
def sendTSNE(self, people):
d = self.getData()
if d is None:
return
else:
(X, y) = d
X_pca = PCA(n_components=50).fit_transform(X, X)
tsne = TSNE(n_components=2, init='random', random_state=0)
X_r = tsne.fit_transform(X_pca)
yVals = list(np.unique(y))
colors = cm.rainbow(np.linspace(0, 1, len(yVals)))
# print(yVals)
plt.figure()
for c, i in zip(colors, yVals):
name = "Unknown" if i == -1 else people[i]
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=name)
plt.legend()
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='png')
imgdata.seek(0)
content = 'data:image/png;base64,' + \
urllib.quote(base64.b64encode(imgdata.buf))
msg = {
"type": "TSNE_DATA",
"content": content
}
self.sendMessage(json.dumps(msg))
def trainSVM(self):
print("+ Training SVM on {} labeled images.".format(len(self.images)))
d = self.getData()
print "Step 1"
if d is None:
self.svm = None
print "d none"
return
else:
print "d not none"
(X, y) = d
print "identity from getData()",y
numIdentities = len(set(y + [-1]))
print numIdentities,set(y+[-1])
if numIdentities <= 1:
print "numIdentities <=1"
return
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
self.svm = GridSearchCV(SVC(C=1), param_grid, cv=5).fit(X, y)
print "Persisting Model", self.svm
self.persistModel(self.svm)
print "Loading Model"
#s = self.loadModel()
#pprint.pprint(s)
#self.svm=s
# svm_persisted = pickle.dumps(self.svm)
# self.svm = pickle.loads(svm_persisted)
def loadModel(self):
# model = open('model.pkl', 'r')
# svm_persisted = pickle.load('model.pkl')
# output.close()
# return svm_persisted
# return True
with open('model.pkl', 'rb') as f:
# if sys.version_info[0] < 3:
mod = pickle.load(f)
return mod
def persistModel(self, mod):
# output = open('model.pkl', 'w')
with open('model.pkl', 'wb') as f:
pickle.dump(mod, f)
# svm_persisted = pickle.dump(mod, 'model.pkl', protocol=2)
# output.close()
return True
def processFrame(self, dataURL, identity):
head = "data:image/jpeg;base64,"
assert(dataURL.startswith(head))
imgdata = base64.b64decode(dataURL[len(head):])
imgF = StringIO.StringIO()
imgF.write(imgdata)
imgF.seek(0)
img = Image.open(imgF)
buf = np.fliplr(np.asarray(img))
rgbFrame = np.zeros((300, 400, 3), dtype=np.uint8)
rgbFrame[:, :, 0] = buf[:, :, 2]
rgbFrame[:, :, 1] = buf[:, :, 1]
rgbFrame[:, :, 2] = buf[:, :, 0]
if not self.training:
annotatedFrame = np.copy(buf)
# cv2.imshow('frame', rgbFrame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# return
identities = []
if not self.training:
bbs = align.getAllFaceBoundingBoxes(rgbFrame)
else:
bb = align.getLargestFaceBoundingBox(rgbFrame)
bbs = [bb] if bb is not None else []
for bb in bbs:
# print(len(bbs))
landmarks = align.findLandmarks(rgbFrame, bb)
alignedFace = align.align(args.imgDim, rgbFrame, bb,
landmarks=landmarks,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
continue
phash = str(imagehash.phash(Image.fromarray(alignedFace)))
if phash in self.images:
identity = self.images[phash].identity
else:
rep = net.forward(alignedFace)
# print(rep)
if self.training:
self.images[phash] = Face(rep, identity)
# TODO: Transferring as a string is suboptimal.
# content = [str(x) for x in cv2.resize(alignedFace, (0,0),
# fx=0.5, fy=0.5).flatten()]
content = [str(x) for x in alignedFace.flatten()]
msg = {
"type": "NEW_IMAGE",
"hash": phash,
"content": content,
"identity": identity,
"representation": rep.tolist()
}
print "training",identity
self.sendMessage(json.dumps(msg))
#print "training",self.images
# with open('images.json', 'w') as fp:
# json.dump(self.images, fp)
with open('images.pkl', 'w') as f:
pickle.dump(self.images, f)
else:
if len(self.people) == 0:
identity = -1
elif len(self.people) == 1:
identity = 0
elif self.svm:
print self.svm.predict
identity = self.svm.predict(rep)[0]
print "predicted",identity
else:
print("hhh")
identity = -1
if identity not in identities:
identities.append(identity)
print identities
if not self.training:
bl = (bb.left(), bb.bottom())
tr = (bb.right(), bb.top())
cv2.rectangle(annotatedFrame, bl, tr, color=(153, 255, 204),
thickness=3)
for p in openface.AlignDlib.OUTER_EYES_AND_NOSE:
cv2.circle(annotatedFrame, center=landmarks[p], radius=3,
color=(102, 204, 255), thickness=-1)
if identity == -1:
if len(self.people) == 1:
name = self.people[0]
else:
name = "Unknown"
else:
name = self.people[identity]
print name
cv2.putText(annotatedFrame, name, (bb.left(), bb.top() - 10),
cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75,
color=(152, 255, 204), thickness=2)
if not self.training:
msg = {
"type": "IDENTITIES",
"identities": identities
}
self.sendMessage(json.dumps(msg))
print identities
plt.figure()
plt.imshow(annotatedFrame)
plt.xticks([])
plt.yticks([])
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='png')
imgdata.seek(0)
content = 'data:image/png;base64,' + \
urllib.quote(base64.b64encode(imgdata.buf))
msg = {
"type": "ANNOTATED",
"content": content
}
plt.close()
self.sendMessage(json.dumps(msg))
def main(reactor):
log.startLogging(sys.stdout)
factory = WebSocketServerFactory()
factory.protocol = OpenFaceServerProtocol
ctx_factory = DefaultOpenSSLContextFactory(tls_key, tls_crt)
reactor.listenSSL(args.port, factory, ctx_factory)
return defer.Deferred()
if __name__ == '__main__':
task.react(main)
| apache-2.0 |
JeffAbrahamson/gtd | cluster_example.py | 1 | 6522 | #!/usr/bin/env python3
"""Cluster data by tf-idf.
"""
from __future__ import print_function
from sklearn import metrics
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from optparse import OptionParser
from time import time
import logging
import sys
import numpy as np
def load_data():
"""Load data.
"""
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
return dataset
def do_cluster(opts, args):
"""
"""
dataset = load_data()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
def main():
"""Parse args and then go cluster.
"""
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
do_cluster(opts, args)
if __name__ == '__main__':
main()
| gpl-2.0 |
YinongLong/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
harisbal/pandas | pandas/io/msgpack/__init__.py | 26 | 1233 | # coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| bsd-3-clause |
mgupta011235/TweetSafe | doc2vec/gridsearch_doc2vec.py | 1 | 8087 | import gensim
import pandas as pd
import random
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from scipy.spatial.distance import cosine
from nltk.tokenize import PunktSentenceTokenizer
import time
###########################################################################
# tokenization code
def seperatePunct(incomingString):
'''
Input:str,
Output: str with all puncuations seperated by spaces
'''
outstr = ''
characters = set(['!','@','#','$',"%","^","&","*",":","\\",
"(",")","+","=","?","\'","\"",";","/",
"{","}","[","]","<",">","~","`","|"])
for char in incomingString:
if char in characters:
outstr = outstr + ' ' + char + ' '
else:
outstr = outstr + char
return outstr
def hasNumbers(inputString):
'''
Input: str
Output: returns a 1 if the string contains a number
'''
return any(char.isdigit() for char in inputString)
def text_cleaner(wordList):
'''
INPUT: List of words to be tokenized
OUTPUT: List of tokenized words
'''
tokenziedList = []
for word in wordList:
#remove these substrings from the word
word = word.replace('[deleted]','')
word = word.replace('>','')
#if link, replace with linktag
if 'http' in word:
tokenziedList.append('LINK_TAG')
continue
#if reference to subreddit, replace with reddittag
if '/r/' in word:
tokenziedList.append('SUBREDDIT_TAG')
continue
#if reference to reddit user, replace with usertag
if '/u/' in word:
tokenziedList.append('USER_TAG')
continue
#if reference to twitter user, replace with usertag
if '@' in word:
tokenziedList.append('USER_TAG')
continue
#if number, replace with numtag
#m8 is a word, 5'10" and 54-59, 56:48 are numbers
if hasNumbers(word) and not any(char.isalpha() for char in word):
tokenziedList.append('NUM_TAG')
continue
#seperate puncuations and add to tokenizedList
newwords = seperatePunct(word).split(" ")
tokenziedList.extend(newwords)
return tokenziedList
def mytokenizer(comment):
'''
Input: takes in a reddit comment as a str or unicode and tokenizes it
Output: a tokenized list
'''
tokenizer = PunktSentenceTokenizer()
sentenceList = tokenizer.tokenize(comment)
wordList = []
for sentence in sentenceList:
wordList.extend(sentence.split(" "))
return text_cleaner(wordList)
#############################################################################
#similarity code
def mostSimilarDoc(model,comment,k,threshold):
'''
Input: doc2vec model, comment is a str, k = number of similar doc vecs
Output: an int indicating hate (1) or not hate (0),most similar subreddit
'''
docvecs = model.docvecs
numdocvec = len(docvecs)
simVals = np.zeros((numdocvec, ))
#tokenize comment
wordTokens = mytokenizer(comment)
#create vector of tokenized comment
#avg over 100 vectors
finalVec = np.zeros((300, ))
for i in xrange(100):
finalVec = finalVec + model.infer_vector(wordTokens)
commentVec = finalVec/100.0
#compute similarity of comment to each subreddit
for vec_ind in xrange(len(docvecs)):
simVals[vec_ind] = 1 - cosine(commentVec,docvecs[vec_ind])
mostSimVecInd = np.argsort(simVals)[-k:]
hatecount = 0
#count how many hates there are
for index in mostSimVecInd:
hatecount += ishateful(docvecs.index_to_doctag(index))
#majority vote to determine hateful/nothateful
if hatecount>=threshold*len(mostSimVecInd):
prediction = 1
else:
prediction = 0
#find most similar subreddit
# mostSimSubreddit = docvecs.index_to_doctag(mostSimVecInd[0])
return prediction
##############################################################################
#hate/NotHate code
def ishateful(subreddit):
'''
Input: str subreddit
Output: int 1 if hateful subreddit, 0 otherwise
'''
# List of not hateful subreddits
final_nothate_srs = ['politics', 'worldnews', 'history', 'blackladies', 'lgbt',
'TransSpace', 'women', 'TwoXChromosomes', 'DebateReligion',
'religion', 'islam', 'Judaism', 'BodyAcceptance', 'fatlogic'
'gaybros','AskMen','AskWomen']
# List of hateful subreddits
final_hateful_srs = ['CoonTown', 'WhiteRights', 'Trans_fags', 'SlutJustice',
'TheRedPill', 'KotakuInAction', 'IslamUnveiled', 'GasTheKikes',
'AntiPOZi', 'fatpeoplehate', 'TalesofFatHate','hamplanethatred',
'shitniggerssay','neofag','altright']
if subreddit in final_hateful_srs:
return 1
else:
return 0
#############################################################################
#scoring code
def test_score(model,path,k,threshold):
'''Input: doc2vec model, path to test data, k val, threshold value
Output: the following list [k,threshold,accu,recall,precision,TP,TN,FN,FP]'''
# print "loading data..."
df = pd.read_csv(path)
tweets = df['tweet_text'].values
labels = df['label'].values
predict = np.zeros((len(labels),))
# print "scoring..."
for row in xrange(len(labels)):
tweet = tweets[row]
prediction = mostSimilarDoc(model,tweet,k,threshold)
predict[row] = prediction
TP = sum(predict+labels == 2)
TN = sum(predict+labels == 0)
FP = sum(predict-labels == 1)
FN = sum(predict-labels == -1)
accu = (TP+TN)/float(len(labels))
recall = TP/float(TP+FN)
precision = TP/float(TP+FP)
print ""
print "k: {}".format(k)
print "threshold: {}".format(threshold)
print "accuracy: {}".format(accu)
print "recall: {}".format(recall)
print "precision: {}".format(precision)
print ""
print "TP: {}".format(TP)
print "TN: {}".format(TN)
print ""
print "FN: {}".format(FN)
print "FP: {}".format(FP)
#output data to be saved in a pd dataframe
return [k,threshold,accu,recall,precision,TP,TN,FN,FP]
##############################################################################
#Main
if __name__ == '__main__':
'''This script runs gridsearch on a doc2vec model to determine the
optimal k and threshold values on the cross val set'''
print "starting..."
#dataset paths
trainpath = '../../data/labeledRedditComments.p'
trainpath2 = '../../data/labeledRedditComments2.p'
cvpath = '../../data/twitter_cross_val.csv'
testpath = '../../data/twitter_test.csv'
sqlpath = '../../data/RedditMay2015Comments.sqlite'
#model paths
# modelPath = '../../doc2vec_models/basemodel2/basemodel2.doc2vec'
# modelPath = '../../doc2vec_models/basemodel3/basemodel3.doc2vec'
# modelPath = '../../doc2vec_models/basemodel4/basemodel4.doc2vec'
modelPath = '../../doc2vec_models/basemodel5/basemodel5.doc2vec'
# modelPath = '../../doc2vec_models/modellower/modellower.doc2vec'
# modelPath = '../../doc2vec_models/model_split/model_split.doc2vec'
print "loading model..."
model = gensim.models.Doc2Vec.load(modelPath)
tstart = time.time()
print "gridsearch..."
results = []
count = 0
for k in xrange(1,15):
for threshold in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]:
print "count: {}".format(count)
results.append(test_score(model,cvpath,k,threshold))
count+=1
print ""
labels = ['k','threshold','accuracy','recall','precision','TP','TN','FN','FP']
df = pd.DataFrame(data=results,columns=labels)
tstop = time.time()
dt = tstop-tstart
print "total time: {}".format(dt)
print "time per gridpoint: {}".format(dt/float(count))
df.to_csv('../../data/gridsearch_modelbase5_on_cross_val.csv')
| gpl-3.0 |
fabioticconi/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/metrics/pairwise.py | 4 | 43059 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances, paired distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr')
else:
X = check_array(X, accept_sparse='csr')
Y = check_array(Y, accept_sparse='csr')
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = check_array(X, ['csr', 'csc', 'coo'], dtype=np.float)
else:
X = check_array(X, ['csr', 'csc', 'coo'], dtype=np.float)
Y = check_array(Y, ['csr', 'csc', 'coo'], dtype=np.float)
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x] = np.where(
flags, min_indices + chunk_y.start, indices[chunk_x])
values[chunk_x] = np.where(
flags, min_values, values[chunk_x])
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Avoid creating temporary matrices bigger than size_threshold (in
bytes). If the problem size gets too big, the implementation then
breaks it down in smaller problems.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
temporary_size = X.size * Y.shape[-1]
# Convert to bytes
temporary_size *= X.itemsize
if temporary_size > size_threshold and sum_over_features:
# Broadcasting the full thing would be too big: it's on the order
# of magnitude of the gigabyte
D = np.empty((X.shape[0], Y.shape[0]), dtype=X.dtype)
index = 0
increment = 1 + int(size_threshold / float(temporary_size) *
X.shape[0])
while index < X.shape[0]:
this_slice = slice(index, index + increment)
tmp = X[this_slice, np.newaxis, :] - Y[np.newaxis, :, :]
tmp = np.abs(tmp, tmp)
tmp = np.sum(tmp, axis=2)
D[this_slice] = tmp
index += increment
else:
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.sqrt(((X - Y) ** 2).sum(axis=-1))
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.abs(X - Y).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
X_normalized = normalize(X, copy=True)
X_normalized -= normalize(Y, copy=True)
return .5 * (X_normalized ** 2).sum(axis=-1)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X, Y : ndarray (n_samples, n_features)
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| bsd-3-clause |
joernhees/scikit-learn | sklearn/datasets/lfw.py | 18 | 18714 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home
from ..utils import Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit-learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 33 | 6157 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_warns_message
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
from numpy.testing import assert_equal
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and scikit-learn do not match in a few places,
# these values are for the scikit-learn version.
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
def test_deprecated_grid_scores(random_state=1):
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
graph_lasso = GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1)
graph_lasso.fit(X)
depr_message = ("Attribute grid_scores was deprecated in version "
"0.19 and will be removed in 0.21. Use "
"'grid_scores_' instead")
assert_warns_message(DeprecationWarning, depr_message,
lambda: graph_lasso.grid_scores)
assert_equal(graph_lasso.grid_scores, graph_lasso.grid_scores_)
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/tests/test_multiclass.py | 7 | 24017 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
gdementen/PyTables | doc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| bsd-3-clause |
pierreg/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 3 | 58598 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class LinearClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age': tf.SparseTensor(values=['1'], indices=[[0, 0]], shape=[1, 1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.sparse_column_with_hash_bucket('age', 2)
classifier = tf.contrib.learn.LinearClassifier(
_joint_weight=True,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = tf.contrib.layers.real_valued_column('', dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
def _optimizer():
return tf.train.FtrlOptimizer(learning_rate=0.1)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=_optimizer,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer='Ftrl',
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]], dtype=tf.float32)
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, target
def _my_metric_op(predictions, targets):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, targets))
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(
tf.constant([[1], [2]]), num_epochs=num_epochs),
}, tf.constant([[.7], [0]], dtype=tf.float32)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the target column, which proves that the
# classifier learns from float input.
self.assertAllClose(predictions_proba, [[.3, .7], [1., 0.]], atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]])
return features, target
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=sparse_features,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({})))
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(tf.constant([1]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=['english'], indices=[[0, 0]], shape=[1, 1]),
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(classifier.predict(input_fn=predict_input_fn,
as_iterable=True))
out1_proba = list(classifier.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
del classifier
classifier2 = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
out2_class = list(classifier2.predict(input_fn=predict_input_fn,
as_iterable=True))
out2_proba = list(classifier2.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.LinearClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_target_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean target should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_target_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': tf.constant([[20], [20], [20]]),
'weights': tf.constant([[100], [1], [1]]),
}
target = tf.constant([[1], [0], [0]])
return features, target
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[1], [1], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'dense_feature': tf.constant([[500.0, 800.0], [200.0, 600.0]])
}, tf.constant([[0], [1]])
dense_feature = tf.contrib.layers.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=cont_features,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_iris_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
target = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predictions, target, atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
target = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predictions, target, atol=0.1)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, target
def _my_metric_op(predictions, targets):
return tf.reduce_sum(tf.mul(predictions, targets))
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns)
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'x': tf.constant(x),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant(y)
x_column = tf.contrib.layers.real_valued_column('x', dimension=3)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose([w[0] for w in weights],
regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [5.0], [7.0]])
}, tf.constant([[1.55], [-1.25], [-3.0]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant([[1.4], [-0.8], [2.6]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder': tf.constant([[0.0]]*num_examples),
}, tf.constant([[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = tf.contrib.layers.real_valued_column('place_holder')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[place_holder],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(regressor.get_variable_value('linear/bias_weight')[0],
0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[x] for x in
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half/10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half/10)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[1 if x%10 == 0 else 0] for x in range(half)] +
[[-1 if x%10 == 0 else 0] for x in range(half)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
tinkoff-dwh/zeppelin | python/src/main/resources/python/zeppelin_python.py | 9 | 9896 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError, Py4JNetworkError
import warnings
import ast
import traceback
import warnings
import signal
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# for back compatibility
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = 1000
self._displayhook = lambda *args: None
self._setup_matplotlib()
def getInterpreterContext(self):
return self.z.getCurrentInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def textbox(self, name, defaultValue=""):
return self.z.textbox(name, defaultValue)
def noteTextbox(self, name, defaultValue=""):
return self.z.noteTextbox(name, defaultValue)
def select(self, name, options, defaultValue=""):
return self.z.select(name, defaultValue, self.getParamOptions(options))
def noteSelect(self, name, options, defaultValue=""):
return self.z.noteSelect(name, defaultValue, self.getParamOptions(options))
def checkbox(self, name, options, defaultChecked=[]):
return self.z.checkbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def noteCheckbox(self, name, options, defaultChecked=[]):
return self.z.noteCheckbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def getParamOptions(self, options):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return javaOptions
def getDefaultChecked(self, defaultChecked):
javaDefaultChecked = self.javaList()
for check in defaultChecked:
javaDefaultChecked.append(check)
return javaDefaultChecked
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, fmt="png", width="auto", height="auto",
**kwargs):
"""Matplotlib show function
"""
if fmt == "png":
img = BytesIO()
p.savefig(img, format=fmt)
img_str = b"data:image/png;base64,"
img_str += base64.b64encode(img.getvalue().strip())
img_tag = "<img src={img} style='width={width};height:{height}'>"
# Decoding is necessary for Python 3 compability
img_str = img_str.decode("ascii")
img_str = img_tag.format(img=img_str, width=width, height=height)
elif fmt == "svg":
img = StringIO()
p.savefig(img, format=fmt)
img_str = img.getvalue()
else:
raise ValueError("fmt must be 'png' or 'svg'")
html = "%html <div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img_str))
img.close()
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72,
fontsize=10, interactive=True, format='png')
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def handler_stop_signals(sig, frame):
sys.exit("Got signal : " + str(sig))
signal.signal(signal.SIGINT, handler_stop_signals)
host = "127.0.0.1"
if len(sys.argv) >= 3:
host = sys.argv[2]
_zcUserQueryNameSpace = {}
client = GatewayClient(address=host, port=int(sys.argv[1]))
#gateway = JavaGateway(client, auto_convert = True)
gateway = JavaGateway(client)
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
_zcUserQueryNameSpace["z"] = z
output = Logger()
sys.stdout = output
#sys.stderr = output
while True :
req = intp.getStatements()
if req == None:
break
try:
stmts = req.statements().split("\n")
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
except:
raise Exception(traceback.format_exc())
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except Py4JNetworkError:
# lost connection from gateway server. exit
sys.exit(1)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
srndic/mimicus | reproduction/fig11.py | 1 | 5327 | #!/usr/bin/env python
'''
Copyright 2014 Nedim Srndic, University of Tuebingen
This file is part of Mimicus.
Mimicus is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mimicus is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mimicus. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
fig11.py
Reproduction of Figure 11 of the paper "Practical Evasion of a
Learning-Based Classifier: A Case Study" by Nedim Srndic and
Pavel Laskov.
Created on March 21, 2014.
'''
from argparse import ArgumentParser
import multiprocessing
import os
import random
import sys
from matplotlib import pyplot
from mimicus.tools.featureedit import FeatureEdit
from mimicus.tools.datasets import csv2numpy
import common
import config
def mimicry(wolf_fname, sheep_feats, m_id):
'''
Mimics file with the features sheep_feats using the attack file
with the name wolf_fname. Returns the resulting feature vector.
'''
mimic = FeatureEdit(wolf_fname).modify_file(sheep_feats, '/run/shm')
os.remove(mimic['path'])
return mimic['feats'], m_id
def mimicry_wrap(args):
'''
Helper function for calling the mimicry function in parallel.
'''
return mimicry(*args)
def fig11(tr_data, tr_labels, te_data, te_labels, tr_files):
'''
Tests the vaccination defense against the Benign Random Noise (BRN)
attack seeded by results of our mimicry attack against itself and
original, unmodified data. Performs 5 trials.
'''
mal_tr_ind = [i for i, l in enumerate(tr_labels) if l == 1]
ben_tr_ind = [i for i, l in enumerate(tr_labels) if l == 0]
mim_data, mim_labels = common.get_FTC_mimicry()
TRIALS = 5
print '\n{:>6}{:>15}{:>15}'.format('%', 'ORIGINAL', 'OUR MIMICRY')
pool = multiprocessing.Pool(processes=None)
scores = []
for subset in (0, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1):
acc = [0.0, 0.0]
sys.stdout.write('{:>6.2f}'.format(subset * 100))
for _ in range(TRIALS):
tr_mod = tr_data.copy()
# Subsample malicious training files for attack
wolf_ind = random.sample(mal_tr_ind,
int(round(subset * len(mal_tr_ind))))
# Mimic random benign files using the sampled files
pargs = [(tr_data[random.choice(ben_tr_ind)], tr_files[w_id], w_id)
for w_id in wolf_ind]
for mimic, w_id in pool.imap(mimicry_wrap, pargs):
tr_mod[w_id] = mimic
# Evaluate the classifier on both clean test data and mimicry data
res = common.evaluate_classifier(tr_mod,
tr_labels,
[te_data, mim_data],
[te_labels, mim_labels])
acc = [old + new for old, new in zip(acc, res)]
acc = [acc[0] / TRIALS, acc[1] / TRIALS]
print '{:>15.3f}{:>15.3f}'.format(acc[0], acc[1])
scores.append(tuple(acc))
return scores
def main():
random.seed(0)
parser = ArgumentParser()
parser.add_argument('--plot', help='Where to save plot (file name)',
default=False)
parser.add_argument('--show', help='Show plot in a window', default=False)
args = parser.parse_args()
print 'Loading training data from CSV...'
tr_data, tr_labels, tr_fnames = csv2numpy(config.get('datasets',
'contagio'))
print 'Loading test data from CSV...'
te_data, te_labels, _ = csv2numpy(config.get('datasets', 'contagio_test'))
print 'Evaluating...'
scores = fig11(tr_data, tr_labels, te_data, te_labels, tr_fnames)
if not (args.plot or args.show):
return 0
# Plot
original, our_mimicry = zip(*scores)
fig = pyplot.figure()
pyplot.plot(original,
label='Clean data',
marker='o', color='k', linewidth=2)
pyplot.plot(our_mimicry,
label='Our mimicry',
marker='+', color='k', linewidth=2, linestyle=':')
axes = fig.gca()
# Set up axes and labels
axes.yaxis.set_ticks([r / 10.0 for r in range(11)])
axes.yaxis.grid()
axes.set_ylim(0, 1)
axes.set_ylabel('Accuracy')
xticklabels = ['0', '0.05', '0.1', '0.5', '1', '5', '10', '50', '100']
axes.set_xticklabels(xticklabels, rotation=0)
axes.set_xlabel('Training set perturbation (%)')
fig.subplots_adjust(bottom=0.13, top=0.95, left=0.11, right=0.96)
pyplot.legend(loc='lower right')
if args.show:
pyplot.show()
if args.plot:
pyplot.savefig(args.plot, dpi=300, bbox_inches='tight')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
RachitKansal/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
biocore/qiita | qiita_pet/test/rest/test_study_preparation.py | 3 | 7792 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from io import StringIO
import os
import pandas as pd
from tornado.escape import json_decode
from qiita_db.metadata_template.util import load_template_to_dataframe
from qiita_db.metadata_template.prep_template import PrepTemplate
from qiita_pet.test.rest.test_base import RESTHandlerTestCase
from qiita_db.util import get_mountpoint
class StudyPrepCreatorTests(RESTHandlerTestCase):
def test_post_non_existant_study(self):
# study id that does not exist
prep = StringIO(EXP_PREP_TEMPLATE.format(0))
prep_table = load_template_to_dataframe(prep)
response = self.post('/api/v1/study/0/preparation?'
'&data_type=16S',
data=prep_table.T.to_dict(),
headers=self.headers, asjson=True)
self.assertEqual(response.code, 404)
def test_post_non_matching_identifiers(self):
prep = StringIO(EXP_PREP_TEMPLATE.format(100))
prep_table = load_template_to_dataframe(prep)
response = self.post('/api/v1/study/1/preparation?'
'data_type=16S',
data=prep_table.T.to_dict(),
headers=self.headers, asjson=True)
self.assertEqual(response.code, 406)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['message'])
self.assertGreater(len(obs['message']), 0)
def test_post_valid_study(self):
prep = StringIO(EXP_PREP_TEMPLATE.format(1))
prep_table = load_template_to_dataframe(prep)
response = self.post('/api/v1/study/1/preparation?data_type=16S',
data=prep_table.T.to_dict(),
headers=self.headers, asjson=True)
self.assertEqual(response.code, 201)
exp = json_decode(response.body)
exp_prep = PrepTemplate(exp['id']).to_dataframe()
prep_table.index.name = 'sample_id'
# sort columns to be comparable
prep_table = prep_table[sorted(prep_table.columns.tolist())]
exp_prep = exp_prep[sorted(exp_prep.columns.tolist())]
exp_prep.drop('qiita_prep_id', axis=1, inplace=True)
pd.util.testing.assert_frame_equal(prep_table, exp_prep)
class StudyPrepArtifactCreatorTests(RESTHandlerTestCase):
def test_post_non_existant_study(self):
uri = '/api/v1/study/0/preparation/0/artifact'
body = {'artifact_type': 'foo', 'filepaths': [['foo.txt', 1],
['bar.txt', 1]],
'artifact_name': 'a name is a name'}
response = self.post(uri, data=body, headers=self.headers, asjson=True)
exp = {'message': 'Study not found'}
self.assertEqual(response.code, 404)
obs = json_decode(response.body)
self.assertEqual(obs, exp)
def test_post_non_existant_prep(self):
uri = '/api/v1/study/1/preparation/1337/artifact'
body = {'artifact_type': 'foo', 'filepaths': [['foo.txt', 1],
['bar.txt', 1]],
'artifact_name': 'a name is a name'}
response = self.post(uri, data=body, headers=self.headers, asjson=True)
exp = {'message': 'Preparation not found'}
self.assertEqual(response.code, 404)
obs = json_decode(response.body)
self.assertEqual(obs, exp)
def test_post_unknown_artifact_type(self):
uri = '/api/v1/study/1/preparation/1/artifact'
body = {'artifact_type': 'foo', 'filepaths': [['foo.txt', 1],
['bar.txt', 1]],
'artifact_name': 'a name is a name'}
response = self.post(uri, data=body, headers=self.headers, asjson=True)
self.assertEqual(response.code, 406)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['message'])
self.assertGreater(len(obs['message']), 0)
def test_post_unknown_filepath_type_id(self):
uri = '/api/v1/study/1/preparation/1/artifact'
body = {'artifact_type': 'foo', 'filepaths': [['foo.txt', 123123],
['bar.txt', 1]],
'artifact_name': 'a name is a name'}
response = self.post(uri, data=body, headers=self.headers, asjson=True)
self.assertEqual(response.code, 406)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['message'])
self.assertGreater(len(obs['message']), 0)
def test_post_files_notfound(self):
uri = '/api/v1/study/1/preparation/1/artifact'
body = {'artifact_type': 'foo', 'filepaths': [['foo.txt', 1],
['bar.txt', 1]],
'artifact_name': 'a name is a name'}
response = self.post(uri, data=body, headers=self.headers, asjson=True)
self.assertEqual(response.code, 406)
obs = json_decode(response.body)
self.assertCountEqual(obs.keys(), ['message'])
self.assertGreater(len(obs['message']), 0)
def test_post_valid(self):
dontcare, uploads_dir = get_mountpoint('uploads')[0]
foo_fp = os.path.join(uploads_dir, '1', 'foo.txt')
bar_fp = os.path.join(uploads_dir, '1', 'bar.txt')
with open(foo_fp, 'w') as fp:
fp.write("@x\nATGC\n+\nHHHH\n")
with open(bar_fp, 'w') as fp:
fp.write("@x\nATGC\n+\nHHHH\n")
prep = StringIO(EXP_PREP_TEMPLATE.format(1))
prep_table = load_template_to_dataframe(prep)
response = self.post('/api/v1/study/1/preparation?data_type=16S',
data=prep_table.T.to_dict(),
headers=self.headers, asjson=True)
prepid = json_decode(response.body)['id']
uri = '/api/v1/study/1/preparation/%d/artifact' % prepid
# 1 -> fwd or rev sequences in fastq
# 3 -> barcodes
body = {'artifact_type': 'FASTQ', 'filepaths': [['foo.txt', 1],
['bar.txt',
'raw_barcodes']],
'artifact_name': 'a name is a name'}
response = self.post(uri, data=body, headers=self.headers, asjson=True)
self.assertEqual(response.code, 201)
obs = json_decode(response.body)['id']
prep_instance = PrepTemplate(prepid)
exp = prep_instance.artifact.id
self.assertEqual(obs, exp)
EXP_PREP_TEMPLATE = (
'sample_name\tbarcode\tcenter_name\tcenter_project_name\t'
'ebi_submission_accession\temp_status\texperiment_design_description\t'
'instrument_model\tlibrary_construction_protocol\tplatform\tprimer\t'
'bar\trun_prefix\tstr_column\n'
'{0}.SKB7.640196\tCCTCTGAGAGCT\tANL\tTest Project\t\tEMP\tBBBB\t'
'Illumina MiSeq\tAAAA\tIllumina\tGTGCCAGCMGCCGCGGTAA\tfoo\t'
's_G1_L002_sequences\tValue for sample 3\n'
'{0}.SKB8.640193\tGTCCGCAAGTTA\tANL\tTest Project\t\tEMP\tBBBB\t'
'Illumina MiSeq\tAAAA\tIllumina\tGTGCCAGCMGCCGCGGTAA\tfoo\t'
's_G1_L001_sequences\tValue for sample 1\n'
'{0}.SKD8.640184\tCGTAGAGCTCTC\tANL\tTest Project\t\tEMP\tBBBB\t'
'Illumina MiSeq\tAAAA\tIllumina\tGTGCCAGCMGCCGCGGTAA\tfoo\t'
's_G1_L001_sequences\tValue for sample 2\n')
if __name__ == '__main__':
main()
| bsd-3-clause |
ragnarekker/Ice-modelling | utilities/getregobsdata.py | 1 | 50189 | # -*- coding: utf-8 -*-
import datetime as dt
import requests
import os as os
import copy as cp
from icemodelling import ice as ice, constants as const
from utilities import makepickle as mp, makelogs as ml, doconversions as dc
from utilities import getmisc as gm
import setenvironment as se
import pandas as pd
__author__ = 'ragnarekker'
def get_obs_location(LocationName):
"""Uses OData query to get the ObsLocation data for a given ObsLocation name.
:param LocationName:
:return:
"""
oDataQuery = "{0}".format(LocationName)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/ObsLocation/?$filter=LocationName eq '{1}'&$format=json"\
.format(se.odata_version, oDataQuery)
data = requests.get(url).json()
data_dict = data['d']['results'][0]
return data_dict
def get_ice_cover(LocationName, fromDate, toDate):
"""Method returns a list of IceCover objects from regObs between fromDate to toDate.
:param LocationName: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
http://api.nve.no/hydrology/regobs/v0.9.4/Odata.svc/IceCoverObsV?$filter=
DtObsTime%20gt%20datetime%272013-11-01%27%20and%20
DtObsTime%20lt%20datetime%272014-06-01%27%20and%20
LocationName%20eq%20%27Hakkloa%20nord%20372%20moh%27%20and%20
LangKey%20eq%201
"""
iceCoverList = []
if isinstance(LocationName, list):
for l in LocationName:
iceCoverList = iceCoverList + get_ice_cover(l, fromDate, toDate)
else:
view = 'IceCoverObsV'
OdataLocationName = LocationName
oDataQuery = "DtObsTime gt datetime'{0}' and " \
"DtObsTime lt datetime'{1}' and " \
"LocationName eq '{2}' and " \
"LangKey eq 1".format(fromDate, toDate, OdataLocationName)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{2}?$filter={1}&$format=json".format(se.odata_version, oDataQuery, view)
data = requests.get(url).json()
datalist = data['d']['results']
for ic in datalist:
iceCoverDate = dc.unix_time_2_normal(ic['DtObsTime'])
iceCoverName = ic['IceCoverName']
iceCoverBefore = ic['IceCoverBeforeName']
cover = ice.IceCover(iceCoverDate, iceCoverName, iceCoverBefore, LocationName)
cover.set_regid(ic['RegID'])
cover.set_utm(ic['UTMNorth'], ic['UTMEast'], ic['UTMZone'])
iceCoverList.append(cover)
return iceCoverList
def get_first_ice_cover(LocationName, fromDate, toDate):
"""Returns the first observation where ice can form on a lake. That is if the ice cover is partly or fully
formed on observation location or the lake.
If no such observation is found an "empty" ice cover is returned at fromDate.
:param LocationName: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
iceCoverSeason = get_ice_cover(LocationName, fromDate, toDate)
iceCoverSeason.sort(key=lambda IceCover: IceCover.date) # start looking at the oldest observations
for ic in iceCoverSeason:
# if the ice cover is partly or fully formed on observation location or the lake
# 2) delvis islagt på målestedet
# 3) helt islagt på målestedet
# 21) hele sjøen islagt
if (ic.iceCoverTID == 2) or (ic.iceCoverTID == 3) or (ic.iceCoverTID == 21):
# and if icecover before was
# 1) isfritt på målestedet
# 2) delvis islagt på målestedet,
# 11) islegging langs land
# 20) hele sjøen isfri, this is fist ice
if (ic.iceCoverBeforeTID == 1) or (ic.iceCoverBeforeTID == 2) or \
(ic.iceCoverBeforeTID == 11) or (ic.iceCoverBeforeTID == 20):
return ic
# datetime objects in IceCover datatype
from_date = dt.datetime.strptime(fromDate, "%Y-%m-%d")
return ice.IceCover(from_date, "Ikke gitt", 'Ikke gitt', LocationName)
def get_last_ice_cover(LocationName, fromDate, toDate):
"""Method gives the observation confirming ice is gone for the season from a lake.
It finds the first observation without ice after an observation(s) with ice.
If none is found, an "empty" icecover object is returned on the last date in the period.
Method works best when dates range over whole seasons.
:param LocationName: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
iceCoverSeason = get_ice_cover(LocationName, fromDate, toDate)
iceCoverSeason.sort(key=lambda IceCover: IceCover.date, reverse=True) # start looking at newest observations
# datetime objects in ice cover data type
to_date = dt.datetime.strptime(toDate, "%Y-%m-%d")
# make "empty" ice cover object on last date. If there is no ice cover observation confirming that ice has gone,
# this wil be returned.
noIceCover = ice.IceCover(to_date, "Ikke gitt", 'Ikke gitt', LocationName)
for ic in iceCoverSeason:
# if "Isfritt på målestedet" (TID=1) or "Hele sjøen isfri" (TID=20). That is, if we have an older "no icecover" case
if (ic.iceCoverTID == 1) or (ic.iceCoverTID == 20):
noIceCover = ic
# if "Delvis islagt på målestedet" (TID=2) or "Helt islagt på målestedet" (TID=3) or "Hele sjøen islagt" (TID=21)
if (ic.iceCoverTID == 2) or (ic.iceCoverTID == 3) or (ic.iceCoverTID == 21):
return noIceCover # we have confirmed ice on the lake so we return the no ice cover observation
return noIceCover
def get_ice_thickness_on_regid(regid):
view = 'IceThicknessV'
oDataQuery = "RegID eq {0} and " \
"LangKey eq 1".format(regid)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{2}?$filter={1}&$format=json".format(se.odata_version,
oDataQuery, view)
data = requests.get(url).json()
datalist = data['d']['results']
#Only one ice column pr regid
ice_column = _parse_ice_column(datalist[0])
return ice_column
def get_ice_thickness_on_location(LocationName, fromDate, toDate):
"""Method returns a list of ice thickness between two dates for a given location in regObs.
:param LocationName: [string/list] name as given in regObs in ObsLocation table. Multiploe locations posible
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
ice_columns = []
if isinstance(LocationName, list):
for l in LocationName:
ice_columns = ice_columns + get_ice_thickness_on_location(l, fromDate, toDate)
else:
view = 'IceThicknessV'
OdataLocationName = LocationName
oDataQuery = "DtObsTime gt datetime'{0}' and " \
"DtObsTime lt datetime'{1}' and " \
"LocationName eq '{2}' and " \
"LangKey eq 1".format(fromDate, toDate, OdataLocationName)
# get data for current view and dates
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{2}?$filter={1}&$format=json".format(se.odata_version, oDataQuery, view)
data = requests.get(url).json()
datalist = data['d']['results']
for ic in datalist:
ice_column = _parse_ice_column(ic)
if ice_column:
ice_columns.append(ice_column)
return ice_columns
def _parse_ice_column(ic):
RegID = ic['RegID']
layers = get_ice_thickness_layers(RegID)
ice_column = None
if layers is not None:
date = dc.unix_time_2_normal(ic['DtObsTime'])
if len(layers) == 0:
layers = [ice.IceLayer(float(ic['IceThicknessSum']), 'unknown')]
ice_column = ice.IceColumn(date, layers)
ice_column.add_metadata('RegID', RegID)
ice_column.add_metadata('LocationName', ic['LocationName'])
ice_column.add_metadata('UTMNorth', ic['UTMNorth'])
ice_column.add_metadata('UTMEast', ic['UTMEast'])
ice_column.add_metadata('UTMZone', ic['UTMZone'])
ice_column.add_layer_at_index(0, ice.IceLayer(ic['SlushSnow'], 'slush'))
ice_column.add_layer_at_index(0, ice.IceLayer(ic['SnowDepth'], 'snow'))
ice_column.merge_and_remove_excess_layers()
ice_column.update_draft_thickness()
ice_column.update_top_layer_is_slush()
iha = ic['IceHeightAfter']
# if ice height after is not given I make an estimate so that I know where to put it in the plot
if iha is None:
ice_column.update_water_line()
ice_column.add_metadata('IceHeightAfter', 'Modeled')
iha = ice_column.draft_thickness - ice_column.water_line
if ice_column.top_layer_is_slush:
iha = iha + const.snow_pull_on_water
ice_column.water_line = ice_column.draft_thickness - float(iha)
if ice_column.top_layer_is_slush is True:
ice_column.water_line -= ice_column.column[0].height
return ice_column
def get_all_season_ice_on_location(LocationNames, fromDate, toDate):
"""Uses odata-api. This returns a list of all ice columns in a period from fromDate to toDate.
At index 0 is first ice (date with no ice layers) and on last index (-1)
is last ice which is the date where there is no more ice on the lake.
If no first or last ice is found in regObs the first or/and last dates in the request is used for initial and
end of ice cover season,
:param LocationNames: [string/list] name as given in regObs in ObsLocation table
:param fromDate: [string] The from date as 'YYYY-MM-DD'
:param toDate: [string] The to date as 'YYYY-MM-DD'
:return:
"""
if not isinstance(LocationNames, list):
LocationNames = [LocationNames]
all_columns = []
for LocationName in LocationNames:
first = get_first_ice_cover(LocationName, fromDate, toDate)
last = get_last_ice_cover(LocationName, fromDate, toDate)
start_column = []
end_column = []
fc = ice.IceColumn(first.date, 0)
fc.add_metadata('LocationName', first.locationName)
fc.add_metadata('RegID', first.RegID)
fc.add_metadata('UTMNorth', first.UTMNorth)
fc.add_metadata('UTMEast', first.UTMEast)
fc.add_metadata('UTMZone', first.UTMZone)
start_column.append(fc)
lc = ice.IceColumn(last.date, 0)
lc.add_metadata('LocationName', last.locationName)
lc.add_metadata('RegID', last.RegID)
lc.add_metadata('UTMNorth', last.UTMNorth)
lc.add_metadata('UTMEast', last.UTMEast)
lc.add_metadata('UTMZone', last.UTMZone)
end_column.append(lc)
columns = get_ice_thickness_on_location(LocationName, fromDate, toDate)
all_columns += (start_column + columns + end_column)
return all_columns
def get_ice_thickness_layers(RegID):
"""
This method returns the ice layes of a given registration (RegID) in regObs. it reads only what is below the first
solid ice layer. Thus snow and slush on the ice is not covered here and is added separately in the public method
for retrieving the full ice column.
This method is an internal method for getRegObdata.py
:param RegID:
:return:
Example og a ice layer object in regObs:
http://api.nve.no/hydrology/regobs/v0.9.5/Odata.svc/IceThicknessLayerV?$filter=RegID%20eq%2034801%20and%20LangKey%20eq%201&$format=json
"""
view = 'IceThicknessLayerV'
url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{1}?" \
"$filter=RegID eq {2} and LangKey eq 1&$format=json"\
.format(se.odata_version, view, RegID)
data = requests.get(url).json()
datalist = data['d']['results']
layers = []
for l in datalist:
thickness = l['IceLayerThickness']
if thickness == None or float(thickness) == 0:
ml.log_and_print('getregobsdata.py -> get_ice_thickness_layers: RegID {0} har icelayers of None thicness.'.format(RegID))
# return empty list if some layers at zero or none.
reversed_layers = []
return reversed_layers
else:
regobs_layer_name = l['IceLayerName']
layer_type = get_tid_from_name('IceLayerKDV', regobs_layer_name)
layer_name = get_ice_type_from_tid(layer_type)
layer = ice.IceLayer(float(thickness), layer_name)
layers.append(layer)
return layers
def get_ice_type_from_tid(IceLayerTID):
"""Method returns a ice type available in the IceLayer class given the regObs type IceLayerTID.
:param IceLayerTID:
:return Ice type as string:
List of layertypes availabel in regObs:
http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/IceLayerKDV?$filter=Langkey%20eq%201%20&$format=json
"""
#
if IceLayerTID == 1:
return 'black_ice'
elif IceLayerTID == 3:
return 'slush_ice'
elif IceLayerTID == 5:
return 'slush'
elif IceLayerTID == 11: # 'Stålis i nedbrytning' in regObs
return 'black_ice'
elif IceLayerTID == 13: # 'Sørpeis i nedbrytning' in regObs
return 'slush_ice'
elif IceLayerTID == 14: # 'Stavis (våris)' in regObs
return 'slush_ice'
else:
return 'unknown'.format(IceLayerTID)
def get_tid_from_name(x_kdv, name):
"""
Gets a xTID for a given xName from a xKDV element in regObs. In other words, it gets the ID for a given name.
:param x_kdv:
:param name:
:return tid:
"""
x_kdv = get_kdv(x_kdv)
tid = -1
for xTID, xName in x_kdv.items():
if xName == name:
tid = xTID
return tid
def get_kdv(x_kdv, get_new=False):
"""Imports a x_kdv view from regObs and returns a dictionary with <key, value> = <ID, Name>
An x_kdv is requested from the regObs api if a pickle file newer than a week exists.
:param x_kdv: [string] x_kdv view
:return dict: {} x_kdv as a dictionary
Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
Ex of url for returning values for IceCoverKDV in norwegian:
http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json
"""
kdv_file = '{0}{1}.pickle'.format(se.kdv_elements_folder, x_kdv)
dict = {}
if get_new:
url = 'http://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?$filter=Langkey%20eq%201%20&$format=json'\
.format(se.odata_version, x_kdv)
ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from URL:{0}".format(url))
kdv = requests.get(url).json()
for a in kdv['d']['results']:
try:
if 'AvalCauseKDV' in url and a['ID'] > 9 and a['ID'] < 26: # this table gets special treatment
dict[a["ID"]] = a["Description"]
else:
dict[a["ID"]] = a["Name"]
except (RuntimeError, TypeError, NameError):
pass
mp.pickle_anything(dict, kdv_file)
else:
if os.path.exists(kdv_file):
# Useful to test if the file is old and if so make a new one
max_file_age = 7
mtime = os.path.getmtime(kdv_file)
last_modified_date = dt.datetime.fromtimestamp(mtime).date()
date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)
# If file older than date limit, request a new.
if last_modified_date < date_limit.date():
dict = get_kdv(x_kdv, get_new=True)
else:
# ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from pickle:{0}".format(kdv_file))
dict = mp.unpickle_anything(kdv_file, print_message=False)
else:
dict = get_kdv(x_kdv, get_new=True)
return dict
# webapi
# START VARSOMDATA
def _stringtime_2_datetime(stringtime):
"""Takes in a date as string, both given as unix datetime or normal local time, as string.
Method returns a normal datetime object.
:param stringtime:
:return: The date as datetime object
"""
if '/Date(' in stringtime: # oData gives unix time. Unix date time in milliseconds from 1.1.1970
unix_date_time = int(stringtime[6:-2])
unix_datetime_in_seconds = unix_date_time/1000 # For some reason they are given in miliseconds
date = dt.datetime.fromtimestamp(int(unix_datetime_in_seconds))
else: # regobs api gives local time
if '.' in stringtime: # though sometimes with seconds given with decimal places
non_decimal_stringtime = stringtime[0:stringtime.index('.')]
stringtime = non_decimal_stringtime
date = dt.datetime.strptime(stringtime, '%Y-%m-%dT%H:%M:%S')
### DOES REGOBS API RETURN UT TIME??? ###
return date
def _make_data_frame(list):
"""Takes a list of objects and makes a Pandas data frame.
:param list: [list of objects]
:return: [data frame]
"""
if len(list) == 0:
data_frame = pd.DataFrame()
else:
observation_fields = list[0].__dict__.keys()
data_frame = pd.DataFrame(columns=observation_fields)
i = 0
for l in list:
observation_values = l.__dict__.values()
data_frame.loc[i] = observation_values
i += 1
return data_frame
def _reg_types_dict(registration_tids=None):
"""Method maps single RegistrationTID values to the query dictionary used in regObs webapi
:param registration_tids: [int or list of int] Definition given below
:return:
Registration IDs and names
10 Fritekst
11 Ulykke/hendelse
12 Bilde
13 Faretegn
-14 Skader
21 Vær
22 Snødekke
23 Snøprofil
-24 Skredfaretegn
25 Stabilitetstest
26 Skredhendelse
27 Observert skredaktivitet(2011)
28 Skredfarevurdering (2012)
-29 Svakt lag
30 Skredfarevurdering (2013)
31 Skredfarevurdering
32 Skredproblem
33 Skredaktivitet
40 Snøskredvarsel
50 Istykkelse
51 Isdekningsgrad
61 Vannstand (2017)
62 Vannstand
71 Skredhendelse
80 Hendelser Grupperings type - Hendelser
81 Skred og faretegn Grupperings type - Skred og faretegn
82 Snødekke og vær Grupperings type - Snødekke og vær
83 Vurderinger og problemer Grupperings type - Vurderinger og problemer
"""
# If resources isn't a list, make it so
if not isinstance(registration_tids, list):
registration_tids = [registration_tids]
registration_dicts = []
for registration_tid in registration_tids:
if registration_tid is None:
return None
elif registration_tid == 10: # Fritekst
registration_dicts.append({'Id': 10, 'SubTypes': []})
elif registration_tid == 11: # Ulykke/hendelse
registration_dicts.append({'Id': 80, 'SubTypes': [11]})
elif registration_tid == 13: # Faretegn
registration_dicts.append({'Id': 81, 'SubTypes': [13]})
elif registration_tid == 21: # Vær
registration_dicts.append({'Id': 82, 'SubTypes': [21]})
elif registration_tid == 22: # Snødekke
registration_dicts.append({'Id': 82, 'SubTypes': [22]})
elif registration_tid == 23: # Snøprofil
registration_dicts.append({'Id': 82, 'SubTypes': [23]})
elif registration_tid == 25: # Stabilitetstest
registration_dicts.append({'Id': 82, 'SubTypes': [25]})
elif registration_tid == 26: # Skredhendelse
registration_dicts.append({'Id': 81, 'SubTypes': [26]})
elif registration_tid == 27: # Skredaktivitet(2011)
registration_dicts.append({'Id': 81, 'SubTypes': [27]})
elif registration_tid == 28: # Skredfarevurdering (2012)
registration_dicts.append({'Id': 83, 'SubTypes': [28]})
elif registration_tid == 30: # Skredfarevurdering (2013)
registration_dicts.append({'Id': 83, 'SubTypes': [30]})
elif registration_tid == 31: # Skredfarevurdering
registration_dicts.append({'Id': 83, 'SubTypes': [31]})
elif registration_tid == 32: # Skredproblem
registration_dicts.append({'Id': 83, 'SubTypes': [32]})
elif registration_tid == 33: # Skredaktivitet
registration_dicts.append({'Id': 81, 'SubTypes': [33]})
elif registration_tid == 50: # Istykkelse
registration_dicts.append({'Id': 50, 'SubTypes': []})
elif registration_tid == 51: # Isdekningsgrad
registration_dicts.append({'Id': 51, 'SubTypes': []})
else:
ml.log_and_print('getobservations.py -> _reg_types_dict: RegistrationTID {0} not supported (yet).'.format(registration_tid))
return registration_dicts
def _make_one_request(from_date=None, to_date=None, reg_id=None, registration_types=None,
region_ids=None, location_id=None, observer_id=None, observer_nick=None, observer_competence=None,
group_id=None, output='List', geohazard_tids=None, lang_key=1, recursive_count=5):
"""Part of get_data method. Parameters the same except observer_id and reg_id can not be lists.
"""
# Dates in the web-api request are strings
if isinstance(from_date, dt.date):
from_date = dt.date.strftime(from_date, '%Y-%m-%d')
elif isinstance(from_date, dt.datetime):
from_date = dt.datetime.strftime(from_date, '%Y-%m-%d')
if isinstance(to_date, dt.date):
to_date = dt.date.strftime(to_date, '%Y-%m-%d')
elif isinstance(to_date, dt.datetime):
to_date = dt.datetime.strftime(to_date, '%Y-%m-%d')
data = [] # data from one query
# query object posted in the request
rssquery = {'LangKey': lang_key,
'RegId': reg_id,
'ObserverGuid': None, # eg. '4d11f3cc-07c5-4f43-837a-6597d318143c',
'SelectedRegistrationTypes': _reg_types_dict(registration_types),
'SelectedRegions': region_ids,
'SelectedGeoHazards': geohazard_tids,
'ObserverId': observer_id,
'ObserverNickName': observer_nick,
'ObserverCompetence': observer_competence,
'GroupId': group_id,
'LocationId': location_id,
'FromDate': from_date,
'ToDate': to_date,
'NumberOfRecords': None, # int
'Offset': 0}
url = 'https://api.nve.no/hydrology/regobs/webapi_{0}/Search/Rss?geoHazard=0'.format(se.web_api_version)
more_available = True
# get data from regObs api. It returns 100 items at a time. If more, continue requesting with an offset. Paging.
while more_available:
# try or if there is an exception, try again.
try:
r = requests.post(url, json=rssquery)
responds = r.json()
data += responds['Results']
if output == 'Count nest':
ml.log_and_print('getobservations.py -> _make_one_request: total matches {0}'.format(responds['TotalMatches']))
return [responds['TotalMatches']]
except:
ml.log_and_print("getobservations.py -> _make_one_request: EXCEPTION. RECURSIVE COUNT {0}".format(recursive_count))
if recursive_count > 1:
recursive_count -= 1 # count down
data += _make_one_request(from_date=from_date,
to_date=to_date,
reg_id=reg_id,
registration_types=registration_types,
region_ids=region_ids,
location_id=location_id,
observer_id=observer_id,
observer_nick=observer_nick,
observer_competence=observer_competence,
group_id=group_id,
output=output,
geohazard_tids=geohazard_tids,
lang_key=lang_key,
recursive_count=recursive_count)
# log request status
if responds['TotalMatches'] == 0:
ml.log_and_print("getobservations.py -> _make_one_request: no data")
else:
ml.log_and_print('getobservations.py -> _make_one_request: {0:.2f}%'.format(len(data) / responds['TotalMatches'] * 100))
# if more get more by adding to the offset
if len(data) < responds['TotalMatches']:
rssquery["Offset"] += 100
else:
more_available = False
return data
def _get_general(registration_class_type, registration_types, from_date, to_date, region_ids=None, location_id=None,
observer_ids=None, observer_nick=None, observer_competence=None, group_id=None,
output='List', geohazard_tids=None, lang_key=1):
"""Gets observations of a requested type and mapps them to a class.
:param registration_class_type: [class for the requested observations]
:param registration_types: [int] RegistrationTID for the requested observation type
:param from_date: [date] A query returns [from_date, to_date]
:param to_date: [date] A query returns [from_date, to_date]
:param region_ids: [int or list of ints] If region_ids = None, all regions are selected
:param observer_ids: [int or list of ints] If observer_ids = None, all observers are selected
:param observer_nick: [int or list of ints] Default None gives all.
:param observer_competence: [string] Part of a observer nick name
:param group_id: [int]
:param output: [string] Options: 'List', 'DataFrame' and 'Count'. Default 'List'.
:param geohazard_tids [int or list of ints] 10 is snow, 20,30,40 are dirt, 60 is water and 70 is ice
:param lang_key [int] 1 is norwegian, 2 is english
:return:
"""
list = None
if output not in ['List', 'DataFrame', 'Count']:
ml.log_and_print('getobservations.py -> _get_general: Illegal output option.')
return list
# In these methods "Count" is obviously to count the list ov observations weras in the more general get_data
# counting a list and counting a nested list of full registratoins are two different tings.
output_for_get_data = output
if output == 'Count':
output_for_get_data = 'Count list'
# Dataframes are based on the lists
if output == 'DataFrame':
output_for_get_data = 'List'
# AvalancheEvaluation3 = 31 and is the table for observed avalanche evaluations.
data_with_more = get_data(from_date=from_date, to_date=to_date, region_ids=region_ids, observer_ids=observer_ids,
observer_nick=observer_nick, observer_competence=observer_competence,
group_id=group_id, location_id=location_id, lang_key=lang_key,
output=output_for_get_data, registration_types=registration_types, geohazard_tids=geohazard_tids)
# wash out all other observation types
data = []
if registration_types:
for d in data_with_more:
if d['RegistrationTid'] == registration_types:
data.append(d)
else: # regisrtation_types is None is for all registrations and no single type is picked out.
data = data_with_more
if output == 'List' or output == 'DataFrame':
list = [registration_class_type(d) for d in data]
list = sorted(list, key=lambda registration_class_type: registration_class_type.DtObsTime)
if output == 'List':
return list
if output == 'DataFrame':
return _make_data_frame(list)
if output == 'Count':
return data
def get_data(from_date=None, to_date=None, registration_types=None, reg_ids=None, region_ids=None, location_id=None,
observer_ids=None, observer_nick=None, observer_competence=None, group_id=None,
output='List', geohazard_tids=None, lang_key=1):
"""Gets data from regObs webapi. Each observation returned as a dictionary in a list.
:param from_date: [string] 'yyyy-mm-dd'. Result includes from date.
:param to_date: [string] 'yyyy-mm-dd'. Result includes to date.
:param lang_key: [int] Default 1 gives Norwegian.
:param reg_id: [int or list of ints] Default None gives all.
:param registration_types: [string or list of strings] Default None gives all.
:param region_ids: [int or list of ints]
:param geo_hazards: [int or list of ints] Default None gives all.
:param observer_id: [int or list of ints] Default None gives all.
:param observer_nick [string] Part of a observer nick name
:param observer_competence [int or list of int] as given in ComtetanceLevelKDV
:param group_id: [int]
:param location_id: [int]
:param output: [string] 'Nested' collects all observations in one regid in one entry (defult for webapi).
'List' is a flatt structure with one entry pr observation type.
'Count nest' makes one request and picks out info on total matches
'Count list' counts every from in every observation
:return: [list or int] Depending on output requested.
"""
# If resources isn't a list, make it so
if not isinstance(registration_types, list):
registration_types = [registration_types]
if not isinstance(region_ids, list):
region_ids = [region_ids]
if not isinstance(geohazard_tids, list):
geohazard_tids = [geohazard_tids]
# regObs weabapi does not support multiple ObserverIDs and RegIDs. Making it so.
if not isinstance(observer_ids, list):
observer_ids = [observer_ids]
if not isinstance(reg_ids, list):
reg_ids = [reg_ids]
# if output requested is 'Count' a number is expected, else a list og observations
all_data = []
for reg_id in reg_ids:
for observer_id in observer_ids:
data = _make_one_request(
from_date=from_date, to_date=to_date, lang_key=lang_key, reg_id=reg_id,
registration_types=registration_types, region_ids=region_ids, geohazard_tids=geohazard_tids,
observer_id=observer_id, observer_nick=observer_nick, observer_competence=observer_competence, group_id=group_id, location_id=location_id, output=output)
all_data += data
# Output 'Nested' is the structure returned from webapi. All observations on the same reg_id are grouped to one list item.
# Output 'List' all observation elements are made a separate item on list.
# Sums of each are available as 'Count list. and 'Count nest'.
if output == 'Count nest':
return sum(all_data)
# data sorted with ascending observation time
all_data = sorted(all_data, key=lambda d: d['DtObsTime'])
if output == 'Nested':
return all_data
elif output == 'List' or output == 'Count list':
listed_data = []
for d in all_data:
for o in d['Registrations']:
listed_data.append({**d, **o})
for p in d['Pictures']:
p['RegistrationName'] = 'Bilde'
listed_data.append({**d, **p})
if output == 'List':
return listed_data
if output == 'Count list':
return len(listed_data)
else:
ml.log_and_print('getobservations.py -> get_data: Unsupported output type.')
return None
# END VARSOMDATA
def _webapi_ice_col_to_ice_class(o):
"""This internal method maps an ice column object as given on webapi to the Ice.IceColumn class
:param o:
:return:
"""
reg_id = o['RegId']
layers = []
ordered_layers = sorted(o['FullObject']['IceThicknessLayers'], key=lambda l: l['IceLayerID'])
for layer in ordered_layers:
ice_type = get_ice_type_from_tid(layer['IceLayerTID'])
ice_layer_height = layer['IceLayerThickness']
if ice_layer_height is not None:
ice_layer = ice.IceLayer(ice_layer_height, ice_type)
layers.append(ice_layer)
date = dt.datetime.strptime(o['DtObsTime'][0:16], "%Y-%m-%dT%H:%M")
if o['FullObject']['IceThicknessSum'] is not None:
if len(layers) == 0:
layers = [ice.IceLayer(float(o['FullObject']['IceThicknessSum']), 'unknown')]
ice_column = ice.IceColumn(date, layers)
ice_column.add_metadata('OriginalObject', o)
ice_column.add_metadata('RegID', reg_id)
ice_column.add_metadata('LocationName', o['LocationName'])
ice_column.add_metadata('LocationID', o['LocationId'])
ice_column.add_metadata('UTMNorth', o['UtmNorth'])
ice_column.add_metadata('UTMEast', o['UtmEast'])
ice_column.add_metadata('UTMZone', o['UtmZone'])
ice_column.add_layer_at_index(0, ice.IceLayer(o['FullObject']['SlushSnow'], 'slush'))
ice_column.add_layer_at_index(0, ice.IceLayer(o['FullObject']['SnowDepth'], 'snow'))
ice_column.merge_and_remove_excess_layers()
ice_column.update_draft_thickness()
ice_column.update_top_layer_is_slush()
# I tried to reference ice column to water surface given ice height after or slush snow, but then what if
# ice height before is given. And what if there are combinations. To many possibilities in regObs..
# Iv calculate a theoretical ice height and use that.
# ice_column.update_water_line()
iha = o['FullObject']['IceHeightAfter']
# ihb = o['FullObject']['IceHeightBefore']
# if ice height after is not given I make an estimate so that I know where to put it in the plot
if iha is None:
ice_column.update_water_line()
ice_column.add_metadata('IceHeightAfter', 'Modelled')
iha = ice_column.draft_thickness - ice_column.water_line
# Probably dont need the test of topp layer is slush because it is includet in draft thickness
# if ice_column.top_layer_is_slush:
# iha = iha + const.snow_pull_on_water
ice_column.water_line = ice_column.draft_thickness - float(iha)
# correct level if top layer was slush
if ice_column.top_layer_is_slush is True:
for layer in ice_column.column:
if layer.get_enum() > 20: # material types >= 20 are snow
continue
elif layer.get_enum() == 2: # slush
ice_column.water_line -= layer.height
break # only the top most slush layer counts
return ice_column
else:
return None
def get_ice_thickness_today():
"""Gets all the observed ice thickness from regObs for today (and 2 days back)
:return: ice_thickeness_obs_dict
"""
to_date = dt.date.today()
from_date = to_date - dt.timedelta(days=2)
ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
ice_thickeness_obs_dict = {}
for o in ice_thickeness_obs:
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
ice_thickeness_obs_dict[o['RegId']] = ice_column
return ice_thickeness_obs_dict
def get_ice_thickness_observations(year, reset_and_get_new=False):
"""Gets all the observed ice thickness (RegistrationTID = 50) from regObs for one year.
The inner workings of the method:
1. We have an option of resetting local storage (delete pickle) and thus forcing the get_new.
2.1 Try opening a pickle, if it doesnt exist, an exception is thrown and we get new data.
2.2 If the requested data is from a previous season, no changes are expected, so load the pickle
without adding the last observations registered in regObs. Anyway, don't get new data.
2.3 If the requested data is from this season, set request from_date to the last modified
date of the pickle and 7 days past that. Add these last obs to the pickle data, and thus it is not
necessary to get new.
3. If get new, it gets all new data for the season.
4. Else, load pickle and if some last obs are to be added, do so.
:param year: [string] Eg '2017-18'
:param reset_and_get_new: [bool]
:return: ice_thickeness_obs_dict
"""
log_referance = 'getregobsdata.py -> get_ice_thickness_observations'
pickle_file_name = '{0}get_ice_thickness_observations_{1}.pickle'.format(se.local_storage, year)
# 1. Remove pickle if it exists, forcing the get_new
if reset_and_get_new:
try:
os.remove(pickle_file_name)
except OSError:
pass
from_date, to_date = gm.get_dates_from_year(year)
add_last_obs = None
get_new = None
try:
mtime = os.path.getmtime(pickle_file_name)
last_modified_date = dt.datetime.fromtimestamp(mtime).date()
# if file newer than the season (that is, if this is historical data), load it without requesting new.
dt_to_date = dt.datetime.strptime(to_date, '%Y-%m-%d').date()
if last_modified_date > dt_to_date:
add_last_obs = False
else:
add_last_obs = True
to_date = dt.date.today()
from_date = last_modified_date - dt.timedelta(days=7)
get_new = False
except OSError:
# file does not exists, so get_new.
ml.log_and_print("{0}: No matching pickle found, getting new data.".format(log_referance))
get_new = True
if get_new:
ml.log_and_print('{0}: Getting new for year {1}.'.format(log_referance, year))
ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
ice_thickeness_obs_dict = {}
for o in ice_thickeness_obs:
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
ice_thickeness_obs_dict[o['RegId']] = ice_column
mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)
else:
ice_thickeness_obs_dict = mp.unpickle_anything(pickle_file_name)
if add_last_obs:
ml.log_and_print("{0}: Adding observations from {1} to {2}".format(log_referance, from_date, to_date))
new_ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
new_ice_thickeness_obs_dict = {}
for o in new_ice_thickeness_obs:
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
new_ice_thickeness_obs_dict[o['RegId']] = ice_column
for k,v in new_ice_thickeness_obs_dict.items():
ice_thickeness_obs_dict[k] = v
mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)
return ice_thickeness_obs_dict
def get_all_season_ice(year, get_new=True):
"""Returns observed ice columns from regObs-webapi over a requested season. Ice covers representing
first ice or ice cover lost are represented by an ice column of zero height.
The workings of this routine:
1. Get one season of data from regobs-api, spreads them out to a long list.
2. Pick out only cover and column and group them on location_ids. We keep only locations with
date for fist ice that season. All observations are mapped to the cover and column class in Ice.py.
3. Map all covers where first_ice or ice_cover_lost is True to zero-height columns. Remove the rest.
If get_new=True new data is retrieved. If get_new=false data is picked from pickle.
:param year:
:param get_new:
:return:
"""
file_name_and_path = '{0}get_all_season_ice_{1}.pickle'.format(se.local_storage, year)
from_date, to_date = gm.get_dates_from_year(year)
if get_new:
all_observations = get_data(from_date=from_date, to_date=to_date, geohazard_tids=70)
all_locations = {}
for o in all_observations:
if o['RegistrationTid'] == 51 or o['RegistrationTid'] == 50:
if o['LocationId'] in all_locations.keys():
all_locations[o['LocationId']].append(o)
else:
all_locations[o['LocationId']] = [o]
# sort oldest first on each location
for l, obs in all_locations.items():
sorted_list = sorted(obs, key=lambda d: d['DtObsTime'])
all_locations[l] = sorted_list
# Use only locations with verified "first ice cover" date.
all_locations_with_first_ice = {}
for l, obs in all_locations.items():
for o in obs:
if o['RegistrationTid'] == 51:
# if the ice cover is partly or fully formed on observation location or the lake
# 2) delvis islagt på målestedet
# 3) helt islagt på målestedet
# 21) hele sjøen islagt
if (o['FullObject']['IceCoverTID'] == 2) or (o['FullObject']['IceCoverTID'] == 3) or \
(o['FullObject']['IceCoverTID'] == 21):
# and if ice cover before was
# 1) isfritt på målestedet
# 2) delvis islagt på målestedet,
# 11) islegging langs land
# 20) hele sjøen isfri, this is fist ice
if (o['FullObject']['IceCoverBeforeTID'] == 1) or (o['FullObject']['IceCoverBeforeTID'] == 2) or \
(o['FullObject']['IceCoverBeforeTID'] == 11) or (o['FullObject']['IceCoverBeforeTID'] == 20):
all_locations_with_first_ice[l] = obs
# Map all observations from regObs-webapi result structure to the classes in ice.py
all_locations_with_classes = {}
for l, obs in all_locations_with_first_ice.items():
all_locations_with_classes[l] = []
location_name = obs[0]['LocationName']
previous_cover = ice.IceCover(dt.datetime.strptime(from_date, "%Y-%m-%d").date(), "Ikke gitt", 'Ikke gitt', location_name)
for o in obs:
if o['RegistrationTid'] == 51:
cover_date = dt.datetime.strptime(o['DtObsTime'][0:16], "%Y-%m-%dT%H:%M")
cover = o['FullObject']['IceCoverTName']
cover_before = o['FullObject']['IceCoverBeforeTName']
cover_after = o['FullObject']['IceCoverAfterTName']
cover_tid = o['FullObject']['IceCoverTID']
cover_before_tid = o['FullObject']['IceCoverBeforeTID']
cover_after_tid = o['FullObject']['IceCoverAfterTID']
this_cover = ice.IceCover(cover_date, cover, cover_before, location_name)
this_cover.set_regid(o['RegId'])
this_cover.set_locationid(o['LocationId'])
this_cover.set_utm(o['UtmNorth'], o['UtmEast'], o['UtmZone'])
this_cover.set_cover_after(cover_after, cover_after_tid)
this_cover.add_original_object(o)
# if the ice cover is partly or fully formed on observation location or the lake
# 2) delvis islagt på målestedet
# 3) helt islagt på målestedet
# 21) hele sjøen islagt
if cover_tid == 2 or cover_tid == 3 or cover_tid == 21:
# and if ice cover before was
# 1) isfritt, nå første is på målestedet på målestedet
# 2) isfritt, nå første is ved land
# 4) Gradvis islegging
if cover_before_tid == 1 or cover_before_tid == 2 or cover_before_tid == 4:
this_cover.mark_as_first_ice()
# if the ice cover is partly or fully gone on location and there was ice yesterday
# 1) Isfritt på målestedet
# 2) delvis islagt på målestedet
# 20) Hele sjøen isfri
if cover_tid == 1 or cover_tid == 2 or cover_tid == 20:
# 10) isfritt resten av vinteren
# Accepts also ice free observation after 15. March
to_year = this_cover.date.year
first_accepted_date = dt.datetime(to_year, 3, 15)
last_accepted_date = dt.datetime(to_year, 9, 1)
if cover_after_tid == 10 or (cover_date > first_accepted_date and cover_date < last_accepted_date):
this_cover.mark_as_ice_cover_lost()
# copy of this cover so that in next iteration I may look up previous cover.
previous_cover = cp.deepcopy(this_cover)
all_locations_with_classes[l].append(this_cover)
if o['RegistrationTid'] == 50:
ice_column = _webapi_ice_col_to_ice_class(o)
if ice_column is not None:
all_locations_with_classes[l].append(ice_column)
# Map all covers where first_ice or ice_cover_lost is True to zero-height columns. Remove all the rest.
all_locations_with_columns = {}
for k, v in all_locations_with_classes.items():
new_v = []
for o in v:
if isinstance(o, ice.IceCover):
if o.first_ice or o.ice_cover_lost:
new_o = ice.IceColumn(o.date, [])
new_o.add_metadata('OriginalObject', o.metadata['OriginalObject'])
new_o.add_metadata('UTMEast', o.metadata['UTMEast'])
new_o.add_metadata('UTMNorth', o.metadata['UTMNorth'])
new_o.add_metadata('UTMZone', o.metadata['UTMZone'])
new_o.add_metadata('LocationName', o.locationName)
new_o.add_metadata('LocationID', o.LocationID)
new_v.append(new_o)
else:
new_v.append(o)
all_locations_with_columns[k] = new_v
mp.pickle_anything(all_locations_with_columns, file_name_and_path)
else:
# if pickle file with all data for the season does not exist, get data anyway
if not os.path.exists(file_name_and_path):
all_locations_with_columns = get_all_season_ice(year, get_new=True)
else:
all_locations_with_columns = mp.unpickle_anything(file_name_and_path, print_message=False)
return all_locations_with_columns
def get_observations_on_location_id(location_id, year, get_new=False):
"""Uses new or stored data from get_all_season_ice and picks out one requested location.
First ice cover is mapped to Ice.IceColumn of zero height. Ice cover lost (mid season or last) the same.
:param location_id: [int] location id as used in regObs
:param year: [string] Eg '2018-19'
:param get_new: [bool] if get_new, new data is requested from regObs
:return: [list of IceThickness]
"""
all_locations = get_all_season_ice(year, get_new=get_new)
# get_all_season_ice returns a dictionary with observations grouped by location_id.
observations_on_location_for_modeling = []
try:
observations_on_location_for_modeling = all_locations[location_id]
except Exception as e:
ml.log_and_print("getregobsdata.py -> get_observations_on_location_id: {0} not found probably..".format(location_id), print_it=True)
return observations_on_location_for_modeling
def get_new_regobs_data():
get_all_season_ice('2019-20')
get_all_season_ice('2018-19')
get_all_season_ice('2017-18')
get_all_season_ice('2016-17')
get_all_season_ice('2015-16')
get_all_season_ice('2014-15')
get_all_season_ice('2013-14')
get_all_season_ice('2012-13')
get_all_season_ice('2011-12')
if __name__ == "__main__":
get_new_regobs_data()
# ice_column = get_ice_thickness_on_regid(130979)
# ice_thicks = get_ice_thickness_observations('2017-18')
# ic = get_ice_cover(LocationNames, from_date, to_date)
# first = get_first_ice_cover(LocationNames, from_date, to_date)
# last = get_last_ice_cover(LocationNames, from_date, to_date)
# ith = get_ice_thickness(LocationNames, from_date, to_date)
# all_on_locations = get_all_season_ice_on_location(LocationNames, from_date, to_date)
# all_in_all = get_all_season_ice('2016-17', get_new=True)
pass
| mit |
mdrumond/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
joshbohde/scikit-learn | examples/plot_neighbors.py | 1 | 1236 | """
=================
Nearest Neighbors
=================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import neighbors, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h = .02 # step size in the mesh
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.NeighborsClassifier()
clf.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:,0].min()-1, X[:,0].max() + 1
y_min, y_max = X[:,1].min()-1, X[:,1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
pl.pcolormesh(xx, yy, Z)
# Plot also the training points
pl.scatter(X[:,0], X[:,1], c=Y)
pl.title('3-Class classification using Nearest Neighbors')
pl.axis('tight')
pl.show()
| bsd-3-clause |
clairetang6/bokeh | bokeh/core/compat/mplexporter/exporter.py | 8 | 12406 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parameters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not implemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not implemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
IntelLabs/hpat | examples/basic_workflow_parallel.py | 1 | 2075 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
# Dataset for analysis
FNAME = "employees.csv"
# This function gets compiled by Numba* and multi-threaded
@njit(parallel=True)
def get_analyzed_data():
df = pd.read_csv(FNAME)
s_bonus = pd.Series(df['Bonus %'])
s_first_name = pd.Series(df['First Name'])
m = s_bonus.mean()
names = s_first_name.sort_values()
return m, names
# Printing names and their average bonus percent
mean_bonus, sorted_first_names = get_analyzed_data()
print(sorted_first_names)
print('Average Bonus %:', mean_bonus)
| bsd-2-clause |
VitensTC/epynet | tests/test_network.py | 1 | 6413 | from epynet import Network
from nose.tools import assert_equal, assert_almost_equal
import pandas as pd
class TestNetwork(object):
@classmethod
def setup_class(self):
self.network = Network(inputfile="tests/testnetwork.inp")
self.network.solve()
@classmethod
def teadown(self):
self.network.ep.ENclose()
def test01_network(self):
# test0 node count
assert_equal(len(self.network.nodes),11)
# test0 link count
assert_equal(len(self.network.links),12)
# test0 reservoir count
assert_equal(len(self.network.reservoirs),1)
# test0 valve count
assert_equal(len(self.network.valves),1)
# test0 pump count
assert_equal(len(self.network.pumps),1)
# test0 tank count
assert_equal(len(self.network.tanks),1)
def test02_link(self):
# test0 the properties of a single link
link = self.network.links["11"]
# pipe index and uid
assert_equal(link.index,9)
assert_equal(link.uid,"11")
# from/to node
assert_equal(link.from_node.uid,"4")
assert_equal(link.to_node.uid,"9")
def test03_pipe(self):
# test0 the properties of a single pipe
pipe = self.network.links["11"]
# check type
assert_equal(pipe.link_type,"pipe")
assert_almost_equal(pipe.length,100,2)
assert_almost_equal(pipe.diameter,150,2)
assert_almost_equal(pipe.roughness,0.1,2)
assert_almost_equal(pipe.minorloss,0.1,2)
# flow
assert_almost_equal(pipe.flow,87.92,2)
# direction
assert_almost_equal(pipe.velocity,1.38,2)
# status
assert_equal(pipe.status,1)
# headloss
assert_almost_equal(pipe.headloss,1.29,2)
# upstream/downstream node
assert_equal(pipe.upstream_node.uid,"4")
assert_equal(pipe.downstream_node.uid,"9")
def test04_pump(self):
pump = self.network.pumps["2"]
# check type
assert_equal(pump.link_type,"pump")
assert_equal(pump.speed,1.0)
assert_almost_equal(pump.flow,109.67,2)
# change speed
pump.speed = 1.5
assert_equal(pump.speed,1.5)
# resolve network
self.network.solve()
assert_almost_equal(pump.flow,164.5,2)
# revert speed
pump.speed = 1.0
self.network.solve()
def test05_valve(self):
valve = self.network.valves["9"]
# check type
assert_equal(valve.link_type,"valve")
# check valve type
assert_equal(valve.valve_type,"PRV")
# valve settings
assert_equal(valve.setting,5)
assert_almost_equal(valve.downstream_node.pressure,5,2)
# change setting
valve.setting = 10
assert_equal(valve.setting,10)
self.network.solve()
assert_almost_equal(valve.downstream_node.pressure,10,2)
def test06_node(self):
node = self.network.nodes["4"]
# uid
assert_equal(node.uid,"4")
# coordinates
coordinates = node.coordinates
assert_almost_equal(coordinates[0],2103.02,2)
assert_almost_equal(coordinates[1],5747.69,2)
# links
assert_equal(len(node.links),3)
# up and downstream links
assert_equal(len(node.downstream_links),2)
assert_equal(len(node.upstream_links),1)
# inflow
assert_equal(round(node.inflow,2),109.67)
# outflow
assert_equal(round(node.outflow,2),round(node.inflow,2)-node.demand)
# elevation
assert_equal(node.elevation,5)
# head
assert_equal(round(node.head,2),25.13)
def test07_junction(self):
junction = self.network.junctions["4"]
assert_equal(round(junction.basedemand,2),1)
assert_equal(round(junction.demand,2),1)
def test08_tank(self):
tank = self.network.tanks["11"]
assert_equal(round(tank.diameter,2),50)
assert_equal(round(tank.initvolume,2),19634.95)
assert_equal(tank.minvolume,0)
assert_equal(tank.minlevel,0)
assert_equal(tank.maxlevel,20)
assert_equal(round(tank.volume,2),19634.95)
assert_equal(round(tank.maxvolume),2*round(tank.volume))
def test09_time(self):
junction = self.network.junctions["4"]
self.network.solve(3600)
assert_equal(round(junction.demand,2),2)
self.network.solve(7200)
assert_equal(round(junction.demand,2),3)
def test10_collections(self):
# collection attributes as pandas Series
assert_almost_equal(self.network.pipes.flow.mean(),46.78,2)
assert_almost_equal(self.network.pipes.diameter.max(),150,2)
assert_almost_equal(self.network.pipes.velocity.min(),0.105,2)
assert_equal(self.network.valves.setting.mean(),10)
assert_almost_equal(self.network.junctions.demand.mean(),2.33,2)
# filtering and slicing collections
assert_equal(len(self.network.pipes[self.network.pipes.velocity > 3]),3)
assert_equal(len(self.network.nodes[self.network.nodes.pressure < 20]),5)
#increase the size of all pipes
self.network.pipes.diameter += 500
assert_almost_equal(self.network.pipes.diameter.mean(),605,2)
self.network.pipes.diameter -= 500
self.network.solve()
# resize pipes, and recalculate velocity
self.network.pipes[self.network.pipes.velocity > 3].diameter += 100
self.network.solve()
assert_equal(len(self.network.pipes[self.network.pipes.velocity > 3]),0)
def test11_timeseries(self):
# run network
self.network.run()
# check return types
# should return Series
assert(isinstance(self.network.pipes["1"].velocity, pd.Series))
# should return Dataframe
assert(isinstance(self.network.pipes.velocity, pd.DataFrame))
# timeseries operations
# pipe 1 max velocity
assert_almost_equal(self.network.pipes["1"].velocity.mean(),1.66,2)
# all day mean velocity
assert_almost_equal(self.network.pipes.velocity.mean().mean(),1.14,2)
# test revert to steady state calculation
self.network.solve()
assert(isinstance(self.network.pipes["1"].velocity, float))
assert(isinstance(self.network.pipes.velocity, pd.Series))
| apache-2.0 |
droundy/deft | papers/polyhedra/figs/plot-order-parameter.py | 1 | 1877 | #!/usr/bin/python2
from __future__ import division
import matplotlib, sys, os, argparse
import read
parser = argparse.ArgumentParser(description='Plot order parameter of polyhedra.')
parser.add_argument('ff', metavar='ff', type=float, help='filling fraction')
parser.add_argument('-N', metavar='N', type=int, default=0,
help="""number of polyhedra, if not supplied then the first
file with the proper filling fraction will be used""")
parser.add_argument('-s', '--shape', metavar='S', default='truncated_tetrahedron',
choices=['cube', 'tetrahedron', 'truncated_tetrahedron'],
help='type of polyhedron, defaults to truncated_tetrahedron')
parser.add_argument('-p', '--periodic', action='store_true',
help='will use periodic cell - defaults to walls otherwise')
parser.add_argument('--hide', action='store_true',
help='will just save the plot and won\'t display it')
args = parser.parse_args()
ff = args.ff
polyhedron = args.shape
if args.periodic:
celltype = 'periodic'
else:
celltype = 'walls'
if args.N == 0:
N = read.get_N("figs/mc/%s-%4.2f-order-%s" %(celltype, ff, polyhedron))
if N == 0:
exit(1)
else: N = args.N
if args.hide:
matplotlib.use('Agg')
from pylab import *
order_parameters = read.read_mc_order(ff, polyhedron, N, celltype)
dim = read.read_mc_dimensions(ff, polyhedron, N, celltype)
dz = dim[2]/len(order_parameters[0,:])
dcostheta = 1/len(order_parameters[:, 0])
print(dz)
print(dcostheta)
z = arange(0, dim[2], dz)
costheta = arange(0, 1, dcostheta)
z, costheta = meshgrid(z, costheta)
ylim(cos(arctan(sqrt(2))), 1)
print(order_parameters.shape)
print(z.shape)
highest = nanmax(order_parameters.flat)
print(highest)
pcolormesh(z, costheta, order_parameters, vmax=highest/10, vmin=0)
print(len(z))
show()
| gpl-2.0 |
sarahgrogan/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
victorbergelin/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/svm/plot_iris.py | 1 | 4011 | # -*- coding:UTF-8 -*-
"""
===========================================================
在鸢尾花卉数据集上绘制不同的 SVM 分类器
===========================================================
在鸢尾花卉数据集的 2D 投影上的不同线性 SVM 分类器的比较。我们只考虑这个数据集的前 2 个特征:
- 萼片长度
- 萼片宽度
此示例显示如何绘制具有不同 kernel 的四个 SVM 分类器的决策表面。
线性模型 ``LinearSVC()`` 和 ``SVC(kernel='linear')`` 产生稍微不同的决策边界。这可能是以下差异的结果:
- ``LinearSVC`` 可以最大限度地减少 squared hinge loss 而 ``SVC`` 最大限度地减少 regular hinge loss.
- ``LinearSVC`` 使用 One-vs-All (也被称作 One-vs-Rest) multiclass reduction ,而 ``SVC`` 则使用 One-vs-One multiclass reduction 。
两个线性模型具有线性决策边界(相交超平面),而非线性内核模型(多项式或 高斯 RBF)具有更灵活的非线性决策边界,其形状取决于内核的种类及其参数。
.. NOTE:: 在绘制玩具 2D 数据集分类器的决策函数的时候可以帮助您直观了解其各自的表现力,请注意,这些直觉并不总是推广到更加接近于现实的高维度的问题。
"""
print(__doc__)
# 加载 numpy, matplotlib, sklearn 等模块
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""创建一个要绘制的点的网格
参数
----------
x: 基于 x 轴的网格数据
y: 基于 y 轴的网格数据
h: meshgrid 的步长参数, 是可选的
返回
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
# meshgrid() 函数用两个坐标轴上的点在平面上画格。具体参阅:https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""绘制分类器的决策边界.
参数
----------
ax: matplotlib 轴对象
clf: 一个分类器
xx: meshgrid ndarray
yy: meshgrid ndarray
params: params 的字典传递给 contourf, 可选
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# contourf() 函数 具体用法参见:http://www.labri.fr/perso/nrougier/teaching/matplotlib/ 和 http://matplotlib.org/examples/pylab_examples/contourf_demo.html
out = ax.contourf(xx, yy, Z, **params)
return out
# 加载一些需要玩的数据
iris = datasets.load_iris()
# 只取前两个特征数据,我们可以通过 2 维数据集来避免这种情况
X = iris.data[:, :2]
y = iris.target
# 我们创建了一个 SVM 的实例并填充了数据。我们不扩展我们的数据,因为我们想绘制支持向量。
C = 1.0 # SVM 正则化参数
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# 绘图区域的标题
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# 设置 2x2 的网格进行绘制.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
| gpl-3.0 |
gnieboer/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 49 | 3078 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/core/reshape.py | 7 | 40736 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import numpy as np
from pandas.types.common import _ensure_platform_int, is_list_like
from pandas.types.cast import _maybe_promote
from pandas.types.missing import notnull
import pandas.types.concat as _concat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse import SparseDataFrame, SparseSeries
from pandas.sparse.array import SparseArray
from pandas._sparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
from pandas.core.groupby import get_group_index, _compress_group_index
import pandas.core.algorithms as algos
import pandas.algos as _algos
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = _compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width:(i + 1) * width]
mask_chunk = new_mask[:, i * width:(i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs):
from pandas.core.groupby import decons_obs_group_ids
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = _compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index, self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sortlevel(0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items,
fill_value=fill_value)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value)
return unstacker.get_result()
def get_compressed_ids(labels, sizes):
from pandas.core.groupby import get_group_index
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return _compress_group_index(ids, sort=True)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> pd.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
# TODO: what about the existing index?
if id_vars is not None:
if not isinstance(id_vars, (tuple, list, np.ndarray)):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not isinstance(value_vars, (tuple, list, np.ndarray)):
value_vars = [value_vars]
frame = frame.ix[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i
for i in range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
mdata[target] = _concat._concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : list
A list of stub names. The wide format variables are assumed to
start with the stub names.
i : str
The name of the id variable.
j : str
The name of the subobservation variable.
stubend : str
Regex to match for the end of the stubs.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable as well as
variables for i and j.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> wide_to_long(df, ["A", "B"], i="id", j="year")
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
Notes
-----
All extra variables are treated as extra id variables. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^" + stub)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
except ValueError:
pass
newdf[j] = newdf_j
return newdf
id_vars = get_var_names(df, "^(?!%s)" % "|".join(stubnames))
if i not in id_vars:
id_vars += [i]
newdf = melt_stub(df, stubnames[0], id_vars, j)
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True))
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
See Also
--------
Series.str.get_dummies
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
if is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse, drop_first=drop_first)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=np.uint8),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=np.uint8)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
dtype=np.uint8)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=np.uint8).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| gpl-3.0 |
dhutchis/systemml | projects/breast_cancer/breastcancer/preprocessing.py | 15 | 26035 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Preprocessing -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for the preprocessing phase of the
breast cancer project.
"""
import math
import os
import numpy as np
import openslide
from openslide import OpenSlideError
from openslide.deepzoom import DeepZoomGenerator
import pandas as pd
from pyspark.ml.linalg import Vectors
import pyspark.sql.functions as F
from scipy.ndimage.morphology import binary_fill_holes
from skimage.color import rgb2gray
from skimage.feature import canny
from skimage.morphology import binary_closing, binary_dilation, disk
# Open Whole-Slide Image
def open_slide(slide_num, folder, training):
"""
Open a whole-slide image, given an image number.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
An OpenSlide object representing a whole-slide image.
"""
if training:
filename = os.path.join(folder, "training_image_data",
"TUPAC-TR-{}.svs".format(str(slide_num).zfill(3)))
else:
# Testing images
filename = os.path.join(folder, "testing_image_data",
"TUPAC-TE-{}.svs".format(str(slide_num).zfill(3)))
try:
slide = openslide.open_slide(filename)
except OpenSlideError:
slide = None
except FileNotFoundError:
slide = None
return slide
# Create Tile Generator
def create_tile_generator(slide, tile_size, overlap):
"""
Create a tile generator for the given slide.
This generator is able to extract tiles from the overall
whole-slide image.
Args:
slide: An OpenSlide object representing a whole-slide image.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A DeepZoomGenerator object representing the tile generator. Each
extracted tile is a PIL Image with shape
(tile_size, tile_size, channels).
Note: This generator is not a true "Python generator function", but
rather is an object that is capable of extracting individual tiles.
"""
generator = DeepZoomGenerator(slide, tile_size=tile_size, overlap=overlap, limit_bounds=True)
return generator
# Determine 20x Magnification Zoom Level
def get_20x_zoom_level(slide, generator):
"""
Return the zoom level that corresponds to a 20x magnification.
The generator can extract tiles from multiple zoom levels,
downsampling by a factor of 2 per level from highest to lowest
resolution.
Args:
slide: An OpenSlide object representing a whole-slide image.
generator: A DeepZoomGenerator object representing a tile generator.
Note: This generator is not a true "Python generator function",
but rather is an object that is capable of extracting individual
tiles.
Returns:
Zoom level corresponding to a 20x magnification, or as close as
possible.
"""
highest_zoom_level = generator.level_count - 1 # 0-based indexing
try:
mag = int(slide.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
# `mag / 20` gives the downsampling factor between the slide's
# magnification and the desired 20x magnification.
# `(mag / 20) / 2` gives the zoom level offset from the highest
# resolution level, based on a 2x downsampling factor in the
# generator.
offset = math.floor((mag / 20) / 2)
level = highest_zoom_level - offset
except ValueError:
# In case the slide magnification level is unknown, just
# use the highest resolution.
level = highest_zoom_level
return level
# Generate Tile Indices For Whole-Slide Image.
def process_slide(slide_num, folder, training, tile_size, overlap):
"""
Generate all possible tile indices for a whole-slide image.
Given a slide number, tile size, and overlap, generate
all possible (slide_num, tile_size, overlap, zoom_level, col, row)
indices.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A list of (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuples representing possible tiles to extract.
"""
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Get 20x zoom level.
zoom_level = get_20x_zoom_level(slide, generator)
# Generate all possible (zoom_level, col, row) tile index tuples.
cols, rows = generator.level_tiles[zoom_level]
tile_indices = [(slide_num, tile_size, overlap, zoom_level, col, row)
for col in range(cols) for row in range(rows)]
return tile_indices
# Generate Tile From Tile Index
def process_tile_index(tile_index, folder, training):
"""
Generate a tile from a tile index.
Given a (slide_num, tile_size, overlap, zoom_level, col, row) tile
index, generate a (slide_num, tile) tuple.
Args:
tile_index: A (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuple representing a tile to extract.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
A (slide_num, tile) tuple, where slide_num is an integer, and tile
is a 3D NumPy array of shape (tile_size, tile_size, channels) in
RGB format.
"""
slide_num, tile_size, overlap, zoom_level, col, row = tile_index
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Generate tile.
tile = np.asarray(generator.get_tile(zoom_level, (col, row)))
return (slide_num, tile)
# Filter Tile For Dimensions & Tissue Threshold
def optical_density(tile):
"""
Convert a tile to optical density values.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
A 3D NumPy array of shape (tile_size, tile_size, channels)
representing optical density values.
"""
tile = tile.astype(np.float64)
#od = -np.log10(tile/255 + 1e-8)
od = -np.log((tile+1)/240)
return od
def keep_tile(tile_tuple, tile_size, tissue_threshold):
"""
Determine if a tile should be kept.
This filters out tiles based on size and a tissue percentage
threshold, using a custom algorithm. If a tile has height &
width equal to (tile_size, tile_size), and contains greater
than or equal to the given percentage, then it will be kept;
otherwise it will be filtered out.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
tile_size: The width and height of a square tile to be generated.
tissue_threshold: Tissue percentage threshold.
Returns:
A Boolean indicating whether or not a tile should be kept for
future usage.
"""
slide_num, tile = tile_tuple
if tile.shape[0:2] == (tile_size, tile_size):
tile_orig = tile
# Check 1
# Convert 3D RGB image to 2D grayscale image, from
# 0 (dense tissue) to 1 (plain background).
tile = rgb2gray(tile)
# 8-bit depth complement, from 1 (dense tissue)
# to 0 (plain background).
tile = 1 - tile
# Canny edge detection with hysteresis thresholding.
# This returns a binary map of edges, with 1 equal to
# an edge. The idea is that tissue would be full of
# edges, while background would not.
tile = canny(tile)
# Binary closing, which is a dilation followed by
# an erosion. This removes small dark spots, which
# helps remove noise in the background.
tile = binary_closing(tile, disk(10))
# Binary dilation, which enlarges bright areas,
# and shrinks dark areas. This helps fill in holes
# within regions of tissue.
tile = binary_dilation(tile, disk(10))
# Fill remaining holes within regions of tissue.
tile = binary_fill_holes(tile)
# Calculate percentage of tissue coverage.
percentage = tile.mean()
check1 = percentage >= tissue_threshold
# Check 2
# Convert to optical density values
tile = optical_density(tile_orig)
# Threshold at beta
beta = 0.15
tile = np.min(tile, axis=2) >= beta
# Apply morphology for same reasons as above.
tile = binary_closing(tile, disk(2))
tile = binary_dilation(tile, disk(2))
tile = binary_fill_holes(tile)
percentage = tile.mean()
check2 = percentage >= tissue_threshold
return check1 and check2
else:
return False
# Generate Samples From Tile
def process_tile(tile_tuple, sample_size, grayscale):
"""
Process a tile into a group of smaller samples.
Cut up a tile into smaller blocks of sample_size x sample_size pixels,
change the shape of each sample from (H, W, channels) to
(channels, H, W), then flatten each into a vector of length
channels*H*W.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
Returns:
A list of (slide_num, sample) tuples representing cut up tiles,
where each sample is a 3D NumPy array of shape
(sample_size_x, sample_size_y, channels).
"""
slide_num, tile = tile_tuple
if grayscale:
tile = rgb2gray(tile)[:, :, np.newaxis] # Grayscale
# Save disk space and future IO time by converting from [0,1] to [0,255],
# at the expense of some minor loss of information.
tile = np.round(tile * 255).astype("uint8")
x, y, ch = tile.shape
# 1. Reshape into a 5D array of (num_x, sample_size_x, num_y, sample_size_y, ch), where
# num_x and num_y are the number of chopped tiles on the x and y axes, respectively.
# 2. Swap sample_size_x and num_y axes to create
# (num_x, num_y, sample_size_x, sample_size_y, ch).
# 3. Combine num_x and num_y into single axis, returning
# (num_samples, sample_size_x, sample_size_y, ch).
samples = (tile.reshape((x // sample_size, sample_size, y // sample_size, sample_size, ch))
.swapaxes(1,2)
.reshape((-1, sample_size, sample_size, ch)))
samples = [(slide_num, sample) for sample in list(samples)]
return samples
# Normalize staining
def normalize_staining(sample_tuple, beta=0.15, alpha=1, light_intensity=255):
"""
Normalize the staining of H&E histology slides.
This function normalizes the staining of H&E histology slides.
References:
- Macenko, Marc, et al. "A method for normalizing histology slides
for quantitative analysis." Biomedical Imaging: From Nano to Macro,
2009. ISBI'09. IEEE International Symposium on. IEEE, 2009.
- http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf
- https://github.com/mitkovetta/staining-normalization
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample is a 3D NumPy array
of shape (H,W,C) that has been stain normalized.
"""
# Setup.
slide_num, sample = sample_tuple
x = np.asarray(sample)
h, w, c = x.shape
x = x.reshape(-1, c).astype(np.float64) # shape (H*W, C)
# Reference stain vectors and stain saturations. We will normalize all slides
# to these references. To create these, grab the stain vectors and stain
# saturations from a desirable slide.
# Values in reference implementation for use with eigendecomposition approach, natural log,
# and `light_intensity=240`.
#stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)
#max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)
# SVD w/ log10, and `light_intensity=255`.
stain_ref = (np.array([0.54598845, 0.322116, 0.72385198, 0.76419107, 0.42182333, 0.55879629])
.reshape(3,2))
max_sat_ref = np.array([0.82791151, 0.61137274]).reshape(2,1)
# Convert RGB to OD.
# Note: The original paper used log10, and the reference implementation used the natural log.
#OD = -np.log((x+1)/light_intensity) # shape (H*W, C)
OD = -np.log10(x/light_intensity + 1e-8)
# Remove data with OD intensity less than beta.
# I.e. remove transparent pixels.
# Note: This needs to be checked per channel, rather than
# taking an average over all channels for a given pixel.
OD_thresh = OD[np.all(OD >= beta, 1), :] # shape (K, C)
# Calculate eigenvectors.
# Note: We can either use eigenvector decomposition, or SVD.
#eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T)) # np.cov results in inf/nans
U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)
# Extract two largest eigenvectors.
# Note: We swap the sign of the eigvecs here to be consistent
# with other implementations. Both +/- eigvecs are valid, with
# the same eigenvalue, so this is okay.
#top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1
top_eigvecs = V[0:2, :].T * -1 # shape (C, 2)
# Project thresholded optical density values onto plane spanned by
# 2 largest eigenvectors.
proj = np.dot(OD_thresh, top_eigvecs) # shape (K, 2)
# Calculate angle of each point wrt the first plane direction.
# Note: the parameters are `np.arctan2(y, x)`
angles = np.arctan2(proj[:, 1], proj[:, 0]) # shape (K,)
# Find robust extremes (a and 100-a percentiles) of the angle.
min_angle = np.percentile(angles, alpha)
max_angle = np.percentile(angles, 100-alpha)
# Convert min/max vectors (extremes) back to optimal stains in OD space.
# This computes a set of axes for each angle onto which we can project
# the top eigenvectors. This assumes that the projected values have
# been normalized to unit length.
extreme_angles = np.array(
[[np.cos(min_angle), np.cos(max_angle)],
[np.sin(min_angle), np.sin(max_angle)]]
) # shape (2,2)
stains = np.dot(top_eigvecs, extreme_angles) # shape (C, 2)
# Merge vectors with hematoxylin first, and eosin second, as a heuristic.
if stains[0, 0] < stains[0, 1]:
stains[:, [0, 1]] = stains[:, [1, 0]] # swap columns
# Calculate saturations of each stain.
# Note: Here, we solve
# OD = VS
# S = V^{-1}OD
# where `OD` is the matrix of optical density values of our image,
# `V` is the matrix of stain vectors, and `S` is the matrix of stain
# saturations. Since this is an overdetermined system, we use the
# least squares solver, rather than a direct solve.
sats, _, _, _ = np.linalg.lstsq(stains, OD.T)
# Normalize stain saturations to have same pseudo-maximum based on
# a reference max saturation.
max_sat = np.percentile(sats, 99, axis=1, keepdims=True)
sats = sats / max_sat * max_sat_ref
# Compute optimal OD values.
OD_norm = np.dot(stain_ref, sats)
# Recreate image.
# Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will
# not return the correct values due to the initital values being outside of [0,255].
# To fix this, we round to the nearest integer, and then clip to [0,255], which is the
# same behavior as Matlab.
#x_norm = np.exp(OD_norm) * light_intensity # natural log approach
x_norm = 10**(-OD_norm) * light_intensity - 1e-8 # log10 approach
x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)
x_norm = x_norm.astype(np.uint8)
x_norm = x_norm.T.reshape(h,w,c)
return (slide_num, x_norm)
def flatten_sample(sample_tuple):
"""
Flatten a (H,W,C) sample into a (C*H*W) row vector.
Transpose each sample from (H, W, channels) to (channels, H, W), then
flatten each into a vector of length channels*H*W.
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample has been transposed
from (H,W,C) to (C,H,W), and flattened to a vector of length
(C*H*W).
"""
slide_num, sample = sample_tuple
# 1. Swap axes from (sample_size_x, sample_size_y, ch) to
# (ch, sample_size_x, sample_size_y).
# 2. Flatten sample into (ch*sample_size_x*sample_size_y).
flattened_sample = sample.transpose(2,0,1).reshape(-1)
return (slide_num, flattened_sample)
# Get Ground Truth Labels
def get_labels_df(folder, filename="training_ground_truth.csv"):
"""
Create a DataFrame with the ground truth labels for each slide.
Args:
folder: Directory containing a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide.
Returns:
A Pandas DataFrame containing the ground truth labels for each
slide.
"""
filepath = os.path.join(folder, filename)
labels_df = pd.read_csv(filepath, names=["tumor_score", "molecular_score"], header=None)
labels_df["slide_num"] = labels_df.index + 1 # slide numbering starts at 1
labels_df.set_index("slide_num", drop=False, inplace=True) # use the slide num as index
return labels_df
# Process All Slides Into A Spark DataFrame
def preprocess(spark, slide_nums, folder="data", training=True, tile_size=1024, overlap=0,
tissue_threshold=0.9, sample_size=256, grayscale=False, normalize_stains=True,
num_partitions=20000):
"""
Preprocess a set of whole-slide images.
Preprocess a set of whole-slide images as follows:
1. Tile the slides into tiles of size (tile_size, tile_size, 3).
2. Filter the tiles to remove unnecessary tissue.
3. Cut the remaining tiles into samples of size
(sample_size, sample_size, ch), where `ch` is 1 if `grayscale`
is true, or 3 otherwise.
Args:
spark: SparkSession.
slide_nums: List of whole-slide numbers to process.
folder: Local directory in which the slides folder and ground truth
file is stored, as a string. This should contain a
`training_image_data` folder with images in the format
`TUPAC-TR-###.svs`, as well as a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide. Alternatively, the folder should contain a
`testing_image_data` folder with images in the format
`TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
tissue_threshold: Tissue percentage threshold for filtering.
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
normalize_stains: Whether or not to apply stain normalization.
num_partitions: Number of partitions to use during processing.
Returns:
A Spark DataFrame in which each row contains the slide number, tumor
score, molecular score, and the sample stretched out into a Vector.
"""
# Filter out broken slides
# Note: "Broken" here is due to a "version of OpenJPEG with broken support for chroma-subsampled
# images".
slides = (spark.sparkContext
.parallelize(slide_nums)
.filter(lambda slide: open_slide(slide, folder, training) is not None))
# Create DataFrame of all tile locations and increase number of partitions
# to avoid OOM during subsequent processing.
tile_indices = (slides.flatMap(
lambda slide: process_slide(slide, folder, training, tile_size, overlap)))
# TODO: Explore computing the ideal paritition sizes based on projected number
# of tiles after filtering. I.e. something like the following:
#rows = tile_indices.count()
#part_size = 128
#channels = 1 if grayscale else 3
#row_mb = tile_size * tile_size * channels * 8 / 1024 / 1024 # size of one row in MB
#rows_per_part = round(part_size / row_mb)
#num_parts = rows / rows_per_part
tile_indices = tile_indices.repartition(num_partitions)
tile_indices.cache()
# Extract all tiles into an RDD, filter, cut into smaller samples, apply stain
# normalization, and flatten.
tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))
filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))
samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))
if normalize_stains:
samples = samples.map(lambda sample: normalize_staining(sample))
samples = samples.map(lambda sample: flatten_sample(sample))
# Convert to a DataFrame
if training:
# Append labels
labels_df = get_labels_df(folder)
samples_with_labels = (samples.map(
lambda tup: (int(tup[0]), int(labels_df.at[tup[0],"tumor_score"]),
float(labels_df.at[tup[0],"molecular_score"]), Vectors.dense(tup[1]))))
df = samples_with_labels.toDF(["slide_num", "tumor_score", "molecular_score", "sample"])
df = df.select(df.slide_num.astype("int"), df.tumor_score.astype("int"),
df.molecular_score, df["sample"])
else: # testing data -- no labels
df = samples.toDF(["slide_num", "sample"])
df = df.select(df.slide_num.astype("int"), df["sample"])
return df
# Save DataFrame
def save(df, filepath, sample_size, grayscale, mode="error", format="parquet", file_size=128):
"""
Save a preprocessed DataFrame with a constraint on the file sizes.
Args:
df: A Spark DataFrame.
filepath: Hadoop-supported path at which to save `df`.
sample_size: The width and height of the square samples.
grayscale: Whether or not to the samples are in grayscale format,
rather than RGB.
mode: Specifies the behavior of `df.write.mode` when the data
already exists. Options include:
* `append`: Append contents of this DataFrame to
existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already
exists.
format: The format in which to save the DataFrame.
file_size: Size in MB of each saved file. 128 MB is an
empirically ideal size.
"""
channels = 1 if grayscale else 3
row_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one row in MB
rows_per_file = round(file_size / row_mb)
df.write.option("maxRecordsPerFile", rows_per_file).mode(mode).save(filepath, format=format)
# Utilities
def add_row_indices(df, training=True):
"""
Add a row index column for faster data ingestion times with SystemML.
Args:
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into a
Vector.
training: Boolean for training or testing datasets.
Returns:
The Spark DataFrame with a row index column called "__INDEX".
"""
rdd = (df.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0]))) # flatten & convert index to 1-based indexing
if training:
df = rdd.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample'])
df = df.select(df["__INDEX"].astype("int"), df.slide_num.astype("int"),
df.tumor_score.astype("int"), df.molecular_score, df["sample"])
else: # testing data -- no labels
df = rdd.toDF(["__INDEX", "slide_num", "sample"])
df = df.select(df["__INDEX"].astype("int"), df.slide_num.astype("int"), df["sample"])
return df
def sample(df, frac, training=True, seed=None):
"""
Sample the DataFrame, stratified on the class.
Args:
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into a
Vector.
frac: Fraction of rows to keep.
training: Boolean for training or testing datasets.
seed: Random seed used for the sampling.
Returns:
A stratified sample of the original Spark DataFrame.
"""
df_sample = df.sampleBy("tumor_score", fractions={1: frac, 2: frac, 3: frac}, seed=seed)
return df_sample
| apache-2.0 |
gustfrontar/LETKF_WRF | wrf/verification/python/plot_histogram.py | 1 | 6461 | # -*- coding: utf-8 -*-
#Grafica el histograma de cada variable dentro de un determinado dominio para el
#punto con el maximo kld. E indica la posicion de dicho punto.
#Esto sirve para tener una idea visual de que significa un KLD grande para
#diferentes variables y casos.
"""
Created on Tue Nov 1 18:45:15 2016
@author:
"""
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import binary_io as bio
import histogram_tools as ht
import bred_vector_functions as bvf
import os
basedir='/home/jruiz/share/EXPERIMENTS/experiments_large_ensemble/data/'
expname = '/OsakaPAR_1km_control1000m_smallrandompert_new/'
plotbasedir=basedir + expname + '/plots/'
undef_in=1.0e20
nbins=30
thresholdmin=0.005
undef_out=np.nan
filetype='analgp'
buf_zone=20
buf_zone_z=4
#The following will be used to extract a particlar variable from the original data.
#This variables should be especified according to the data that we have in the binary files.
ctl_vars='U','V','W','T','QV','QHYD' #Complete list of variables in ctl file.
ctl_inirecord=[0,12,24,36,48,60] #Starting record for each variable. From 0 to N
ctl_endrecord=[11,23,35,47,59,71] #End record for each variable. From 0 to N.
#Which variables and levels are we going to plot?
plotlevels=np.array([3,7,9]) #Which levels will be plotted (this levels are equivalent to the BV plots)
plotvars='U','V','W','T','QV','QHYD' #Which variables will be plotted.
#Create the plotbasedir
if not os.path.exists(plotbasedir):
os.mkdir(plotbasedir)
#Defini initial and end times using datetime module.
itime = dt.datetime(2013,7,13,5,25,00) #Initial time.
etime = dt.datetime(2013,7,13,5,39,00) #End time.
#Define the delta.
delta=dt.timedelta(seconds=60)
nx=180
ny=180
nz=np.max(ctl_endrecord) + 1 #Total number of records in binary file.
nlev=12 #Number of vertical levels for 3D variables.
ntimes=1 + np.around((etime-itime).seconds / delta.seconds) #Total number of times.
#Define regions
my_hist=dict()
ens_mean=dict()
ens_std=dict()
ens_skew=dict()
ens_kurt=dict()
ctime=itime
#Get lat lon.
lat=bio.read_data_direct(basedir + expname + '/latlon/lat.grd',nx,ny,1,'>f4')[:,:,0]
lon=bio.read_data_direct(basedir + expname + '/latlon/lon.grd',nx,ny,1,'>f4')[:,:,0]
int_liquid=np.zeros([nx,ny,nlev])
it=0
while ( ctime <= etime ):
print( ctime )
print ( 'Reading the histogram ')
hist_file=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/histogram.grd'
max_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/maxvar.grd'
min_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/minvar.grd'
hist=ht.read_histogram(hist_file,max_file,min_file,nx,ny,nbins,ctl_vars,ctl_inirecord,ctl_endrecord,dtypeinfloat='>f4',dtypeinint='>i2',undef_in=undef_in,undef_out=undef_out)
kld_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/kldistance.grd'
kld=bio.read_data_scale_2(kld_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
# hist_properties=analyze_histogram_fun( my_hist , thresholdmin )
mean_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/moment0001.grd'
ens_mean=bio.read_data_scale_2(mean_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
std_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/moment0002.grd'
ens_std=bio.read_data_scale_2(std_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
skew_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/moment0003.grd'
ens_skew=bio.read_data_scale_2(skew_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
kurt_file =basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/'+ filetype + '/' + '/moment0004.grd'
ens_kurt=bio.read_data_scale_2(kurt_file,nx,ny,nz,ctl_vars,ctl_inirecord,ctl_endrecord,dtypein='f4',undef_in=undef_in,undef_out=undef_out)
#Compute total integrated liquid (we will use this to identify areas associated with clouds and convection)
tmp_int_liquid = np.nansum(ens_mean['QHYD'],2)
for ilev in range(0,nlev) : #Create a fake 3D array for the vertically integrated liquid
#This is because the plotting function expects a 3D array as input.
int_liquid[:,:,ilev]=tmp_int_liquid
for key in hist :
my_kld=kld[key][buf_zone:-buf_zone,buf_zone:-buf_zone,:-buf_zone_z]
my_std=ens_std[key]
my_skew=ens_skew[key]/np.power(my_std,3/2)
my_kurt=ens_kurt[key]/np.power(my_std,2) -3
my_hist=hist[key]
#Busco el maximo del KLD
max_loc=np.nanargmax( my_kld )
[tmpnx , tmpny , tmpnz ]=np.shape( my_kld )
[xmax,ymax,zmax]=np.unravel_index( max_loc,(tmpnx,tmpny,tmpnz) )
max_range=my_hist['maxval'][xmax,ymax,zmax]
min_range=my_hist['minval'][xmax,ymax,zmax]
my_delta=(max_range-min_range)/nbins
my_range=min_range + my_delta / 2 + my_delta * np.arange(0,nbins,1)
my_bars=my_hist['hist'][xmax,ymax,zmax,:] / np.sum( my_hist['hist'][xmax,ymax,zmax,:] )
smooth_range=2
my_bars_s=np.zeros(np.shape(my_bars))
for ii in range(0,my_bars.size) :
mini=ii-smooth_range
maxi=ii+smooth_range
if( mini < 0 ) :
mini=0
if( maxi > np.size(my_bars)-1 ) :
maxi=np.size(my_bars)-1
my_bars_s[ii]=np.mean( my_bars[mini:maxi])
my_mean=ens_mean[key][xmax,ymax,zmax]
my_sigma=ens_std[key][xmax,ymax,zmax]
my_gauss_fit=my_delta*(1/(np.sqrt(2*np.pi*my_sigma)))*np.exp( -np.power( my_range-my_mean,2)/(2*my_sigma))
my_label= key + ' KLD=' + my_kld[xmax,ymax,zmax].astype('U4')
the_bars=plt.bar( my_range , my_bars_s , width = my_delta ,color='r',label=my_label )
the_lines=plt.plot( my_range , my_gauss_fit , 'k--',linewidth=4)
plt.legend(handles=[the_bars])
#plt.show()
#Plot the increments.
print( 'Generationg the following figure : ' + 'Figure_histogram_maxkld_' + key + '_' + ctime.strftime("%Y%m%d%H%M%S") + 'png' )
plt.savefig( basedir + expname + '/plots/Figure_histogram_maxkld_' + key + '_' + ctime.strftime("%Y%m%d%H%M%S") + 'png' )
plt.close()
ctime = ctime + delta
it = it + 1
print ( "Finish time loop" )
| gpl-3.0 |
fabioticconi/scikit-learn | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
yanlend/scikit-learn | sklearn/ensemble/forest.py | 2 | 62656 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/blocking_input.py | 10 | 11766 | """
This provides several classes used for blocking interaction with figure
windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for
interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking
way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for
interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that
will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import verbose
from matplotlib.cbook import is_sequence_of_strings
import matplotlib.lines as mlines
import warnings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
if not is_sequence_of_strings(eventslist):
raise ValueError("Requires a sequence of event name strings")
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks = []
def add_event(self, event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self, index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self, index=-1):
self.pop_event(index)
pop.__doc__ = pop_event.__doc__
def __call__(self, n=1, timeout=30):
"""
Blocking call to retrieve n events
"""
if not isinstance(n, int):
raise ValueError("Requires an integer argument")
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append(
self.fig.canvas.mpl_connect(n, self.on_event))
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
button_add = 1
button_pop = 3
button_stop = 2
def __init__(self, fig, mouse_add=1, mouse_pop=3, mouse_stop=2):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event'))
self.button_add = mouse_add
self.button_pop = mouse_pop
self.button_stop = mouse_stop
def post_event(self):
"""
This will be called to process events
"""
if len(self.events) == 0:
warnings.warn("No events yet")
elif self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == self.button_pop:
self.mouse_event_pop(event)
elif button == self.button_stop:
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
if event.key is None:
# at least in mac os X gtk backend some key returns None.
return
key = event.key.lower()
if key in ['backspace', 'delete']:
self.mouse_event_pop(event)
elif key in ['escape', 'enter']:
# on windows XP and wxAgg, the enter key doesn't seem to register
self.mouse_event_stop(event)
else:
self.mouse_event_add(event)
def mouse_event_add(self, event):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self, -1)
def mouse_event_stop(self, event):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self, -1)
# This will exit even if not in infinite mode. This is
# consistent with MATLAB and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def mouse_event_pop(self, event):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self, -1)
# Now remove any existing clicks if possible
if len(self.events) > 0:
self.pop(event, -1)
def add_click(self, event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata, event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks), event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
line = mlines.Line2D([event.xdata], [event.ydata],
marker='+', color='r')
event.inaxes.add_line(line)
self.marks.append(line)
self.fig.canvas.draw()
def pop_click(self, event, index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
# NOTE: I do NOT understand why the above 3 lines does not work
# for the keyboard backspace event on windows XP wxAgg.
# maybe event.inaxes here is a COPY of the actual axes?
def pop(self, event, index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(event, index)
BlockingInput.pop(self, index)
def cleanup(self, event=None):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self, n=n, timeout=timeout)
return self.clicks
class BlockingContourLabeler(BlockingMouseInput):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self, cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure)
def add_click(self, event):
self.button1(event)
def pop_click(self, event, index=-1):
self.button3(event)
def button1(self, event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
if event.inaxes == self.cs.ax:
self.cs.add_label_near(event.x, event.y, self.inline,
inline_spacing=self.inline_spacing,
transform=False)
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self, event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self, inline, inline_spacing=5, n=-1, timeout=-1):
self.inline = inline
self.inline_spacing = inline_spacing
BlockingMouseInput.__call__(self, n=n, timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=(
'button_press_event', 'key_press_event'))
def post_event(self):
"""
Determines if it is a key event
"""
if len(self.events) == 0:
warnings.warn("No events yet")
else:
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self, n=1, timeout=timeout)
return self.keyormouse
| mit |
benhamner/GEFlightQuest | PythonModule/geflight/benchmark/estimated_arrival_benchmark.py | 3 | 1935 | from dateutil.parser import parse
from geflight.transform import flighthistoryevents, utilities as tu
from geflight.benchmark import utilities as bu
from geflight.benchmark import process_test_set_scaffold
import os
import pandas as pd
def process_day(day):
day.df_test_flight_history["estimated_runway_arrival"] = "MISSING"
day.df_test_flight_history["estimated_gate_arrival"] = "MISSING"
df_fhe = pd.read_csv(os.path.join(day.test_day_path, "FlightHistory",
"flighthistoryevents.csv"),
converters={"date_time_recorded": tu.parse_datetime_format6})
df_fhe = df_fhe.sort("date_time_recorded")
for i, row in df_fhe.iterrows():
f_id = row["flight_history_id"]
if f_id not in day.df_test_flight_history.index:
continue
if type(row["data_updated"]) != str:
continue
offset = day.df_test_flight_history["arrival_airport_timezone_offset"][f_id]
if offset>0:
offset_str = "+" + str(offset)
else:
offset_str = str(offset)
gate_str = flighthistoryevents.get_estimated_gate_arrival_string(row["data_updated"])
if gate_str:
day.df_test_flight_history["estimated_gate_arrival"][f_id] = parse(gate_str+offset_str)
runway_str = flighthistoryevents.get_estimated_runway_arrival_string(row["data_updated"])
if runway_str:
day.df_test_flight_history["estimated_runway_arrival"][f_id] = parse(runway_str+offset_str)
for i, row in day.df_test_flight_history.iterrows():
day.df_predictions["actual_runway_arrival"][i] = bu.get_estimated_arrival(row, "runway", day.cutoff_time)
day.df_predictions["actual_gate_arrival"][i] = bu.get_estimated_arrival(row, "gate", day.cutoff_time)
return day.df_predictions
if __name__=="__main__":
process_test_set_scaffold.process_test_set(process_day,
"estimated_arrival_benchmark.csv")
| bsd-2-clause |
AtsushiSakai/jsk_visualization_packages | jsk_rqt_plugins/src/jsk_rqt_plugins/hist.py | 1 | 7882 | #!/usr/bin/env python
from rqt_gui_py.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot
from python_qt_binding.QtGui import QAction, QIcon, QMenu, QWidget
from python_qt_binding.QtGui import QWidget, QVBoxLayout, QSizePolicy, QColor
from rqt_py_common.topic_completer import TopicCompleter
from matplotlib.colors import colorConverter
from rqt_py_common.topic_helpers import is_slot_numeric
from rqt_plot.rosplot import ROSData as _ROSData
from rqt_plot.rosplot import RosPlotException
from matplotlib.collections import (PolyCollection,
PathCollection, LineCollection)
import matplotlib
import matplotlib.patches as mpatches
import rospkg
import rospy
from cStringIO import StringIO
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from jsk_recognition_msgs.msg import HistogramWithRange, HistogramWithRangeBin
import os, sys
import argparse
try:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except ImportError:
# work around bug in dateutil
import sys
import thread
sys.modules['_thread'] = thread
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
class ROSData(_ROSData):
def _get_data(self, msg):
val = msg
try:
if not self.field_evals:
return val
for f in self.field_evals:
val = f(val)
return val
except IndexError:
self.error = RosPlotException("[%s] index error for: %s" % (self.name, str(val).replace('\n', ', ')))
except TypeError:
self.error = RosPlotException("[%s] value was not numeric: %s" % (self.name, val))
class HistogramPlot(Plugin):
def __init__(self, context):
super(HistogramPlot, self).__init__(context)
self.setObjectName('HistogramPlot')
self._args = self._parse_args(context.argv())
self._widget = HistogramPlotWidget(self._args.topics)
context.add_widget(self._widget)
def _parse_args(self, argv):
parser = argparse.ArgumentParser(prog='rqt_histogram_plot', add_help=False)
HistogramPlot.add_arguments(parser)
args = parser.parse_args(argv)
return args
@staticmethod
def add_arguments(parser):
group = parser.add_argument_group('Options for rqt_histogram plugin')
group.add_argument('topics', nargs='?', default=[], help='Topics to plot')
class HistogramPlotWidget(QWidget):
_redraw_interval = 40
def __init__(self, topics):
super(HistogramPlotWidget, self).__init__()
self.setObjectName('HistogramPlotWidget')
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('jsk_rqt_plugins'),
'resource', 'plot_histogram.ui')
loadUi(ui_file, self)
self.cv_bridge = CvBridge()
self.subscribe_topic_button.setIcon(QIcon.fromTheme('add'))
self.pause_button.setIcon(QIcon.fromTheme('media-playback-pause'))
self.clear_button.setIcon(QIcon.fromTheme('edit-clear'))
self.data_plot = MatHistogramPlot(self)
self.data_plot_layout.addWidget(self.data_plot)
self._topic_completer = TopicCompleter(self.topic_edit)
self._topic_completer.update_topics()
self.topic_edit.setCompleter(self._topic_completer)
self.data_plot.dropEvent = self.dropEvent
self.data_plot.dragEnterEvent = self.dragEnterEvent
self._start_time = rospy.get_time()
self._rosdata = None
if len(topics) != 0:
self.subscribe_topic(topics)
self._update_plot_timer = QTimer(self)
self._update_plot_timer.timeout.connect(self.update_plot)
self._update_plot_timer.start(self._redraw_interval)
@Slot('QDropEvent*')
def dropEvent(self, event):
if event.mimeData().hasText():
topic_name = str(event.mimeData().text())
else:
droped_item = event.source().selectedItems()[0]
topic_name = str(droped_item.data(0, Qt.UserRole))
self.subscribe_topic(topic_name)
@Slot()
def on_topic_edit_returnPressed(self):
if self.subscribe_topic_button.isEnabled():
self.subscribe_topic(str(self.topic_edit.text()))
@Slot()
def on_subscribe_topic_button_clicked(self):
self.subscribe_topic(str(self.topic_edit.text()))
def subscribe_topic(self, topic_name):
self.topic_with_field_name = topic_name
self.pub_image = rospy.Publisher(topic_name + "/histogram_image", Image)
if not self._rosdata:
self._rosdata = ROSData(topic_name, self._start_time)
else:
if self._rosdata != topic_name:
self._rosdata.close()
self.data_plot.clear()
self._rosdata = ROSData(topic_name, self._start_time)
else:
rospy.logwarn("%s is already subscribed", topic_name)
def enable_timer(self, enabled=True):
if enabled:
self._update_plot_timer.start(self._redraw_interval)
else:
self._update_plot_timer.stop()
@Slot()
def on_clear_button_clicked(self):
self.data_plot.clear()
@Slot(bool)
def on_pause_button_clicked(self, checked):
self.enable_timer(not checked)
def update_plot(self):
if not self._rosdata:
return
data_x, data_y = self._rosdata.next()
if len(data_y) == 0:
return
axes = self.data_plot._canvas.axes
axes.cla()
if self._rosdata.sub.data_class is HistogramWithRange:
xs = [y.count for y in data_y[-1].bins]
pos = [y.min_value for y in data_y[-1].bins]
widths = [y.max_value - y.min_value for y in data_y[-1].bins]
axes.set_xlim(xmin=pos[0], xmax=pos[-1] + widths[-1])
else:
xs = data_y[-1]
pos = np.arange(len(xs))
widths = [1] * len(xs)
axes.set_xlim(xmin=0, xmax=len(xs))
#axes.xticks(range(5))
for p, x, w in zip(pos, xs, widths):
axes.bar(p, x, color='r', align='center', width=w)
axes.legend([self.topic_with_field_name], prop={'size': '8'})
self.data_plot._canvas.draw()
buffer = StringIO()
self.data_plot._canvas.figure.savefig(buffer, format="png")
buffer.seek(0)
img_array = np.asarray(bytearray(buffer.read()), dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.CV_LOAD_IMAGE_COLOR)
self.pub_image.publish(self.cv_bridge.cv2_to_imgmsg(img, "bgr8"))
class MatHistogramPlot(QWidget):
class Canvas(FigureCanvas):
def __init__(self, parent=None):
super(MatHistogramPlot.Canvas, self).__init__(Figure())
self.axes = self.figure.add_subplot(111)
self.figure.tight_layout()
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
def resizeEvent(self, event):
super(MatHistogramPlot.Canvas, self).resizeEvent(event)
self.figure.tight_layout()
def __init__(self, parent=None):
super(MatHistogramPlot, self).__init__(parent)
self._canvas = MatHistogramPlot.Canvas()
self._toolbar = NavigationToolbar(self._canvas, self._canvas)
vbox = QVBoxLayout()
vbox.addWidget(self._toolbar)
vbox.addWidget(self._canvas)
self.setLayout(vbox)
def redraw(self):
pass
def clear(self):
self._canvas.axes.cla()
self._canvas.draw()
| mit |
nelson-liu/scikit-learn | examples/classification/plot_lda_qda.py | 32 | 5381 | """
====================================================================
Linear and Quadratic Discriminant Analysis with covariance ellipsoid
====================================================================
This example plots the covariance ellipsoids of each class and
decision boundary learned by LDA and QDA. The ellipsoids display
the double standard deviation for each class. With LDA, the
standard deviation is the same for all the classes, while each
class has its own standard deviation with QDA.
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
alpha = 0.5
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha,
color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha,
color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha,
color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha,
color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color, edgecolor='yellow',
linewidth=2, zorder=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
glennq/scikit-learn | examples/model_selection/randomized_search.py | 35 | 3287 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
nmayorov/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
dacoex/pvlib-python | pvlib/spa.py | 3 | 42425 | """
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
"""
# Contributors:
# Created by Tony Lorenzo (@alorenzo175), Univ. of Arizona, 2015
from __future__ import division
import os
import threading
import warnings
import logging
pvl_logger = logging.getLogger('pvlib')
import numpy as np
# this block is a way to use an environment variable to switch between
# compiling the functions with numba or just use numpy
def nocompile(*args, **kwargs):
return lambda func: func
if os.getenv('PVLIB_USE_NUMBA', '0') != '0':
try:
from numba import jit, __version__
except ImportError:
warnings.warn('Could not import numba, falling back to numpy ' +
'calculation')
jcompile = nocompile
USE_NUMBA = False
else:
major, minor = __version__.split('.')[:2]
if int(major + minor) >= 17:
# need at least numba >= 0.17.0
jcompile = jit
USE_NUMBA = True
else:
warnings.warn('Numba version must be >= 0.17.0, falling back to ' +
'numpy')
jcompile = nocompile
USE_NUMBA = False
else:
jcompile = nocompile
USE_NUMBA = False
TABLE_1_DICT = {
'L0': np.array(
[[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, 4.68, 801.82],
[70.0, 0.83, 9437.76],
[62.0, 3.98, 8827.39],
[61.0, 1.82, 7084.9],
[57.0, 2.78, 6286.6],
[56.0, 4.39, 14143.5],
[56.0, 3.47, 6279.55],
[52.0, 0.19, 12139.55],
[52.0, 1.33, 1748.02],
[51.0, 0.28, 5856.48],
[49.0, 0.49, 1194.45],
[41.0, 5.37, 8429.24],
[41.0, 2.4, 19651.05],
[39.0, 6.17, 10447.39],
[37.0, 6.04, 10213.29],
[37.0, 2.57, 1059.38],
[36.0, 1.71, 2352.87],
[36.0, 1.78, 6812.77],
[33.0, 0.59, 17789.85],
[30.0, 0.44, 83996.85],
[30.0, 2.74, 1349.87],
[25.0, 3.16, 4690.48]]),
'L1': np.array(
[[628331966747.0, 0.0, 0.0],
[206059.0, 2.678235, 6283.07585],
[4303.0, 2.6351, 12566.1517],
[425.0, 1.59, 3.523],
[119.0, 5.796, 26.298],
[109.0, 2.966, 1577.344],
[93.0, 2.59, 18849.23],
[72.0, 1.14, 529.69],
[68.0, 1.87, 398.15],
[67.0, 4.41, 5507.55],
[59.0, 2.89, 5223.69],
[56.0, 2.17, 155.42],
[45.0, 0.4, 796.3],
[36.0, 0.47, 775.52],
[29.0, 2.65, 7.11],
[21.0, 5.34, 0.98],
[19.0, 1.85, 5486.78],
[19.0, 4.97, 213.3],
[17.0, 2.99, 6275.96],
[16.0, 0.03, 2544.31],
[16.0, 1.43, 2146.17],
[15.0, 1.21, 10977.08],
[12.0, 2.83, 1748.02],
[12.0, 3.26, 5088.63],
[12.0, 5.27, 1194.45],
[12.0, 2.08, 4694.0],
[11.0, 0.77, 553.57],
[10.0, 1.3, 6286.6],
[10.0, 4.24, 1349.87],
[9.0, 2.7, 242.73],
[9.0, 5.64, 951.72],
[8.0, 5.3, 2352.87],
[6.0, 2.65, 9437.76],
[6.0, 4.67, 4690.48]]),
'L2': np.array(
[[52919.0, 0.0, 0.0],
[8720.0, 1.0721, 6283.0758],
[309.0, 0.867, 12566.152],
[27.0, 0.05, 3.52],
[16.0, 5.19, 26.3],
[16.0, 3.68, 155.42],
[10.0, 0.76, 18849.23],
[9.0, 2.06, 77713.77],
[7.0, 0.83, 775.52],
[5.0, 4.66, 1577.34],
[4.0, 1.03, 7.11],
[4.0, 3.44, 5573.14],
[3.0, 5.14, 796.3],
[3.0, 6.05, 5507.55],
[3.0, 1.19, 242.73],
[3.0, 6.12, 529.69],
[3.0, 0.31, 398.15],
[3.0, 2.28, 553.57],
[2.0, 4.38, 5223.69],
[2.0, 3.75, 0.98]]),
'L3': np.array(
[[289.0, 5.844, 6283.076],
[35.0, 0.0, 0.0],
[17.0, 5.49, 12566.15],
[3.0, 5.2, 155.42],
[1.0, 4.72, 3.52],
[1.0, 5.3, 18849.23],
[1.0, 5.97, 242.73]]),
'L4': np.array(
[[114.0, 3.142, 0.0],
[8.0, 4.13, 6283.08],
[1.0, 3.84, 12566.15]]),
'L5': np.array(
[[1.0, 3.14, 0.0]]),
'B0': np.array(
[[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]),
'B1': np.array(
[[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]),
'R0': np.array(
[[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]),
'R1': np.array(
[[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78]]),
'R2': np.array(
[[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]),
'R3': np.array(
[[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]),
'R4': np.array(
[[4.0, 2.56, 6283.08]])
}
TABLE_1_DICT['L1'].resize((64, 3))
TABLE_1_DICT['L2'].resize((64, 3))
TABLE_1_DICT['L3'].resize((64, 3))
TABLE_1_DICT['L4'].resize((64, 3))
TABLE_1_DICT['L5'].resize((64, 3))
TABLE_1_DICT['B1'].resize((5, 3))
TABLE_1_DICT['R1'].resize((40, 3))
TABLE_1_DICT['R2'].resize((40, 3))
TABLE_1_DICT['R3'].resize((40, 3))
TABLE_1_DICT['R4'].resize((40, 3))
HELIO_LONG_TABLE = np.array([TABLE_1_DICT['L0'],
TABLE_1_DICT['L1'],
TABLE_1_DICT['L2'],
TABLE_1_DICT['L3'],
TABLE_1_DICT['L4'],
TABLE_1_DICT['L5']])
HELIO_LAT_TABLE = np.array([TABLE_1_DICT['B0'],
TABLE_1_DICT['B1']])
HELIO_RADIUS_TABLE = np.array([TABLE_1_DICT['R0'],
TABLE_1_DICT['R1'],
TABLE_1_DICT['R2'],
TABLE_1_DICT['R3'],
TABLE_1_DICT['R4']])
NUTATION_ABCD_ARRAY = np.array([
[-171996, -174.2, 92025, 8.9],
[-13187, -1.6, 5736, -3.1],
[-2274, -0.2, 977, -0.5],
[2062, 0.2, -895, 0.5],
[1426, -3.4, 54, -0.1],
[712, 0.1, -7, 0],
[-517, 1.2, 224, -0.6],
[-386, -0.4, 200, 0],
[-301, 0, 129, -0.1],
[217, -0.5, -95, 0.3],
[-158, 0, 0, 0],
[129, 0.1, -70, 0],
[123, 0, -53, 0],
[63, 0, 0, 0],
[63, 0.1, -33, 0],
[-59, 0, 26, 0],
[-58, -0.1, 32, 0],
[-51, 0, 27, 0],
[48, 0, 0, 0],
[46, 0, -24, 0],
[-38, 0, 16, 0],
[-31, 0, 13, 0],
[29, 0, 0, 0],
[29, 0, -12, 0],
[26, 0, 0, 0],
[-22, 0, 0, 0],
[21, 0, -10, 0],
[17, -0.1, 0, 0],
[16, 0, -8, 0],
[-16, 0.1, 7, 0],
[-15, 0, 9, 0],
[-13, 0, 7, 0],
[-12, 0, 6, 0],
[11, 0, 0, 0],
[-10, 0, 5, 0],
[-8, 0, 3, 0],
[7, 0, -3, 0],
[-7, 0, 0, 0],
[-7, 0, 3, 0],
[-7, 0, 3, 0],
[6, 0, 0, 0],
[6, 0, -3, 0],
[6, 0, -3, 0],
[-6, 0, 3, 0],
[-6, 0, 3, 0],
[5, 0, 0, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[-5, 0, 3, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[-4, 0, 0, 0],
[3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
[-3, 0, 0, 0],
])
NUTATION_YTERM_ARRAY = np.array([
[0, 0, 0, 0, 1],
[-2, 0, 0, 2, 2],
[0, 0, 0, 2, 2],
[0, 0, 0, 0, 2],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[-2, 1, 0, 2, 2],
[0, 0, 0, 2, 1],
[0, 0, 1, 2, 2],
[-2, -1, 0, 2, 2],
[-2, 0, 1, 0, 0],
[-2, 0, 0, 2, 1],
[0, 0, -1, 2, 2],
[2, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[2, 0, -1, 2, 2],
[0, 0, -1, 0, 1],
[0, 0, 1, 2, 1],
[-2, 0, 2, 0, 0],
[0, 0, -2, 2, 1],
[2, 0, 0, 2, 2],
[0, 0, 2, 2, 2],
[0, 0, 2, 0, 0],
[-2, 0, 1, 2, 2],
[0, 0, 0, 2, 0],
[-2, 0, 0, 2, 0],
[0, 0, -1, 2, 1],
[0, 2, 0, 0, 0],
[2, 0, -1, 0, 1],
[-2, 2, 0, 2, 2],
[0, 1, 0, 0, 1],
[-2, 0, 1, 0, 1],
[0, -1, 0, 0, 1],
[0, 0, 2, -2, 0],
[2, 0, -1, 2, 1],
[2, 0, 1, 2, 2],
[0, 1, 0, 2, 2],
[-2, 1, 1, 0, 0],
[0, -1, 0, 2, 2],
[2, 0, 0, 2, 1],
[2, 0, 1, 0, 0],
[-2, 0, 2, 2, 2],
[-2, 0, 1, 2, 1],
[2, 0, -2, 0, 1],
[2, 0, 0, 0, 1],
[0, -1, 1, 0, 0],
[-2, -1, 0, 2, 1],
[-2, 0, 0, 0, 1],
[0, 0, 2, 2, 1],
[-2, 0, 2, 0, 1],
[-2, 1, 0, 2, 1],
[0, 0, 1, -2, 0],
[-1, 0, 1, 0, 0],
[-2, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 2, 0],
[0, 0, -2, 2, 2],
[-1, -1, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, -1, 1, 2, 2],
[2, -1, -1, 2, 2],
[0, 0, 3, 2, 2],
[2, -1, 0, 2, 2],
])
@jcompile('float64(int64, int64, int64, int64, int64, int64, int64)',
nopython=True)
def julian_day_dt(year, month, day, hour, minute, second, microsecond):
"""This is the original way to calculate the julian day from the NREL paper.
However, it is much faster to convert to unix/epoch time and then convert
to julian day. Note that the date must be UTC."""
if month <= 2:
year = year-1
month = month+12
a = int(year/100)
b = 2 - a + int(a * 0.25)
frac_of_day = (microsecond + (second + minute * 60 + hour * 3600)
) * 1.0 / (3600*24)
d = day + frac_of_day
jd = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + d +
b - 1524.5)
return jd
@jcompile('float64(float64)', nopython=True)
def julian_day(unixtime):
jd = unixtime * 1.0 / 86400 + 2440587.5
return jd
@jcompile('float64(float64, float64)', nopython=True)
def julian_ephemeris_day(julian_day, delta_t):
jde = julian_day + delta_t * 1.0 / 86400
return jde
@jcompile('float64(float64)', nopython=True)
def julian_century(julian_day):
jc = (julian_day - 2451545) * 1.0 / 36525
return jc
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_century(julian_ephemeris_day):
jce = (julian_ephemeris_day - 2451545) * 1.0 / 36525
return jce
@jcompile('float64(float64)', nopython=True)
def julian_ephemeris_millennium(julian_ephemeris_century):
jme = julian_ephemeris_century * 1.0 / 10
return jme
@jcompile('float64(float64)', nopython=True)
def heliocentric_longitude(jme):
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
l4 = 0.0
l5 = 0.0
for row in range(HELIO_LONG_TABLE.shape[1]):
l0 += (HELIO_LONG_TABLE[0, row, 0]
* np.cos(HELIO_LONG_TABLE[0, row, 1]
+ HELIO_LONG_TABLE[0, row, 2] * jme)
)
l1 += (HELIO_LONG_TABLE[1, row, 0]
* np.cos(HELIO_LONG_TABLE[1, row, 1]
+ HELIO_LONG_TABLE[1, row, 2] * jme)
)
l2 += (HELIO_LONG_TABLE[2, row, 0]
* np.cos(HELIO_LONG_TABLE[2, row, 1]
+ HELIO_LONG_TABLE[2, row, 2] * jme)
)
l3 += (HELIO_LONG_TABLE[3, row, 0]
* np.cos(HELIO_LONG_TABLE[3, row, 1]
+ HELIO_LONG_TABLE[3, row, 2] * jme)
)
l4 += (HELIO_LONG_TABLE[4, row, 0]
* np.cos(HELIO_LONG_TABLE[4, row, 1]
+ HELIO_LONG_TABLE[4, row, 2] * jme)
)
l5 += (HELIO_LONG_TABLE[5, row, 0]
* np.cos(HELIO_LONG_TABLE[5, row, 1]
+ HELIO_LONG_TABLE[5, row, 2] * jme)
)
l_rad = (l0 + l1 * jme + l2 * jme**2 + l3 * jme**3 + l4 * jme**4 +
l5 * jme**5)/10**8
l = np.rad2deg(l_rad)
return l % 360
@jcompile('float64(float64)', nopython=True)
def heliocentric_latitude(jme):
b0 = 0.0
b1 = 0.0
for row in range(HELIO_LAT_TABLE.shape[1]):
b0 += (HELIO_LAT_TABLE[0, row, 0]
* np.cos(HELIO_LAT_TABLE[0, row, 1]
+ HELIO_LAT_TABLE[0, row, 2] * jme)
)
b1 += (HELIO_LAT_TABLE[1, row, 0]
* np.cos(HELIO_LAT_TABLE[1, row, 1]
+ HELIO_LAT_TABLE[1, row, 2] * jme)
)
b_rad = (b0 + b1 * jme)/10**8
b = np.rad2deg(b_rad)
return b
@jcompile('float64(float64)', nopython=True)
def heliocentric_radius_vector(jme):
r0 = 0.0
r1 = 0.0
r2 = 0.0
r3 = 0.0
r4 = 0.0
for row in range(HELIO_RADIUS_TABLE.shape[1]):
r0 += (HELIO_RADIUS_TABLE[0, row, 0]
* np.cos(HELIO_RADIUS_TABLE[0, row, 1]
+ HELIO_RADIUS_TABLE[0, row, 2] * jme)
)
r1 += (HELIO_RADIUS_TABLE[1, row, 0]
* np.cos(HELIO_RADIUS_TABLE[1, row, 1]
+ HELIO_RADIUS_TABLE[1, row, 2] * jme)
)
r2 += (HELIO_RADIUS_TABLE[2, row, 0]
* np.cos(HELIO_RADIUS_TABLE[2, row, 1]
+ HELIO_RADIUS_TABLE[2, row, 2] * jme)
)
r3 += (HELIO_RADIUS_TABLE[3, row, 0]
* np.cos(HELIO_RADIUS_TABLE[3, row, 1]
+ HELIO_RADIUS_TABLE[3, row, 2] * jme)
)
r4 += (HELIO_RADIUS_TABLE[4, row, 0]
* np.cos(HELIO_RADIUS_TABLE[4, row, 1]
+ HELIO_RADIUS_TABLE[4, row, 2] * jme)
)
r = (r0 + r1 * jme + r2 * jme**2 + r3 * jme**3 + r4 * jme**4)/10**8
return r
@jcompile('float64(float64)', nopython=True)
def geocentric_longitude(heliocentric_longitude):
theta = heliocentric_longitude + 180.0
return theta % 360
@jcompile('float64(float64)', nopython=True)
def geocentric_latitude(heliocentric_latitude):
beta = -1.0*heliocentric_latitude
return beta
@jcompile('float64(float64)', nopython=True)
def mean_elongation(julian_ephemeris_century):
x0 = (297.85036
+ 445267.111480 * julian_ephemeris_century
- 0.0019142 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 189474)
return x0
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_sun(julian_ephemeris_century):
x1 = (357.52772
+ 35999.050340 * julian_ephemeris_century
- 0.0001603 * julian_ephemeris_century**2
- julian_ephemeris_century**3 / 300000)
return x1
@jcompile('float64(float64)', nopython=True)
def mean_anomaly_moon(julian_ephemeris_century):
x2 = (134.96298
+ 477198.867398 * julian_ephemeris_century
+ 0.0086972 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 56250)
return x2
@jcompile('float64(float64)', nopython=True)
def moon_argument_latitude(julian_ephemeris_century):
x3 = (93.27191
+ 483202.017538 * julian_ephemeris_century
- 0.0036825 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 327270)
return x3
@jcompile('float64(float64)', nopython=True)
def moon_ascending_longitude(julian_ephemeris_century):
x4 = (125.04452
- 1934.136261 * julian_ephemeris_century
+ 0.0020708 * julian_ephemeris_century**2
+ julian_ephemeris_century**3 / 450000)
return x4
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def longitude_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_psi_sum = 0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
a = NUTATION_ABCD_ARRAY[row, 0]
b = NUTATION_ABCD_ARRAY[row, 1]
argsin = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (a + b * julian_ephemeris_century) * np.sin(np.radians(argsin))
delta_psi_sum += term
delta_psi = delta_psi_sum*1.0/36000000
return delta_psi
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_eps_sum = 0.0
for row in range(NUTATION_YTERM_ARRAY.shape[0]):
c = NUTATION_ABCD_ARRAY[row, 2]
d = NUTATION_ABCD_ARRAY[row, 3]
argcos = (NUTATION_YTERM_ARRAY[row, 0]*x0 +
NUTATION_YTERM_ARRAY[row, 1]*x1 +
NUTATION_YTERM_ARRAY[row, 2]*x2 +
NUTATION_YTERM_ARRAY[row, 3]*x3 +
NUTATION_YTERM_ARRAY[row, 4]*x4)
term = (c + d * julian_ephemeris_century) * np.cos(np.radians(argcos))
delta_eps_sum += term
delta_eps = delta_eps_sum*1.0/36000000
return delta_eps
@jcompile('float64(float64)', nopython=True)
def mean_ecliptic_obliquity(julian_ephemeris_millennium):
U = 1.0*julian_ephemeris_millennium/10
e0 = (84381.448 - 4680.93 * U - 1.55 * U**2
+ 1999.25 * U**3 - 51.38 * U**4 - 249.67 * U**5
- 39.05 * U**6 + 7.12 * U**7 + 27.87 * U**8
+ 5.79 * U**9 + 2.45 * U**10)
return e0
@jcompile('float64(float64, float64)', nopython=True)
def true_ecliptic_obliquity(mean_ecliptic_obliquity, obliquity_nutation):
e0 = mean_ecliptic_obliquity
deleps = obliquity_nutation
e = e0*1.0/3600 + deleps
return e
@jcompile('float64(float64)', nopython=True)
def aberration_correction(earth_radius_vector):
deltau = -20.4898 / (3600 * earth_radius_vector)
return deltau
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sun_longitude(geocentric_longitude, longitude_nutation,
aberration_correction):
lamd = geocentric_longitude + longitude_nutation + aberration_correction
return lamd
@jcompile('float64(float64, float64)', nopython=True)
def mean_sidereal_time(julian_day, julian_century):
v0 = (280.46061837 + 360.98564736629 * (julian_day - 2451545)
+ 0.000387933 * julian_century**2 - julian_century**3 / 38710000)
return v0 % 360.0
@jcompile('float64(float64, float64, float64)', nopython=True)
def apparent_sidereal_time(mean_sidereal_time, longitude_nutation,
true_ecliptic_obliquity):
v = mean_sidereal_time + longitude_nutation * np.cos(
np.radians(true_ecliptic_obliquity))
return v
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_right_ascension(apparent_sun_longitude,
true_ecliptic_obliquity,
geocentric_latitude):
num = (np.sin(np.radians(apparent_sun_longitude))
* np.cos(np.radians(true_ecliptic_obliquity))
- np.tan(np.radians(geocentric_latitude))
* np.sin(np.radians(true_ecliptic_obliquity)))
alpha = np.degrees(np.arctan2(num, np.cos(
np.radians(apparent_sun_longitude))))
return alpha % 360
@jcompile('float64(float64, float64, float64)', nopython=True)
def geocentric_sun_declination(apparent_sun_longitude, true_ecliptic_obliquity,
geocentric_latitude):
delta = np.degrees(np.arcsin(np.sin(np.radians(geocentric_latitude)) *
np.cos(np.radians(true_ecliptic_obliquity)) +
np.cos(np.radians(geocentric_latitude)) *
np.sin(np.radians(true_ecliptic_obliquity)) *
np.sin(np.radians(apparent_sun_longitude))))
return delta
@jcompile('float64(float64, float64, float64)', nopython=True)
def local_hour_angle(apparent_sidereal_time, observer_longitude,
sun_right_ascension):
"""Measured westward from south"""
H = apparent_sidereal_time + observer_longitude - sun_right_ascension
return H % 360
@jcompile('float64(float64)', nopython=True)
def equatorial_horizontal_parallax(earth_radius_vector):
xi = 8.794 / (3600 * earth_radius_vector)
return xi
@jcompile('float64(float64)', nopython=True)
def uterm(observer_latitude):
u = np.arctan(0.99664719 * np.tan(np.radians(observer_latitude)))
return u
@jcompile('float64(float64, float64, float64)', nopython=True)
def xterm(u, observer_latitude, observer_elevation):
x = (np.cos(u) + observer_elevation / 6378140
* np.cos(np.radians(observer_latitude)))
return x
@jcompile('float64(float64, float64, float64)', nopython=True)
def yterm(u, observer_latitude, observer_elevation):
y = (0.99664719 * np.sin(u) + observer_elevation / 6378140
* np.sin(np.radians(observer_latitude)))
return y
@jcompile('float64(float64, float64,float64, float64)', nopython=True)
def parallax_sun_right_ascension(xterm, equatorial_horizontal_parallax,
local_hour_angle, geocentric_sun_declination):
num = (-xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.sin(np.radians(local_hour_angle)))
denom = (np.cos(np.radians(geocentric_sun_declination))
- xterm * np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta_alpha = np.degrees(np.arctan2(num, denom))
return delta_alpha
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_sun_right_ascension(geocentric_sun_right_ascension,
parallax_sun_right_ascension):
alpha_prime = geocentric_sun_right_ascension + parallax_sun_right_ascension
return alpha_prime
@jcompile('float64(float64, float64, float64, float64, float64, float64)',
nopython=True)
def topocentric_sun_declination(geocentric_sun_declination, xterm, yterm,
equatorial_horizontal_parallax,
parallax_sun_right_ascension,
local_hour_angle):
num = ((np.sin(np.radians(geocentric_sun_declination)) - yterm
* np.sin(np.radians(equatorial_horizontal_parallax)))
* np.cos(np.radians(parallax_sun_right_ascension)))
denom = (np.cos(np.radians(geocentric_sun_declination)) - xterm
* np.sin(np.radians(equatorial_horizontal_parallax))
* np.cos(np.radians(local_hour_angle)))
delta = np.degrees(np.arctan2(num, denom))
return delta
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_local_hour_angle(local_hour_angle,
parallax_sun_right_ascension):
H_prime = local_hour_angle - parallax_sun_right_ascension
return H_prime
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_elevation_angle_without_atmosphere(observer_latitude,
topocentric_sun_declination,
topocentric_local_hour_angle
):
e0 = np.degrees(np.arcsin(
np.sin(np.radians(observer_latitude))
* np.sin(np.radians(topocentric_sun_declination))
+ np.cos(np.radians(observer_latitude))
* np.cos(np.radians(topocentric_sun_declination))
* np.cos(np.radians(topocentric_local_hour_angle))))
return e0
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def atmospheric_refraction_correction(local_pressure, local_temp,
topocentric_elevation_angle_wo_atmosphere,
atmos_refract):
# switch sets delta_e when the sun is below the horizon
switch = topocentric_elevation_angle_wo_atmosphere >= -1.0 * (
0.26667 + atmos_refract)
delta_e = ((local_pressure / 1010.0) * (283.0 / (273 + local_temp))
* 1.02 / (60 * np.tan(np.radians(
topocentric_elevation_angle_wo_atmosphere
+ 10.3 / (topocentric_elevation_angle_wo_atmosphere
+ 5.11))))) * switch
return delta_e
@jcompile('float64(float64, float64)', nopython=True)
def topocentric_elevation_angle(topocentric_elevation_angle_without_atmosphere,
atmospheric_refraction_correction):
e = (topocentric_elevation_angle_without_atmosphere
+ atmospheric_refraction_correction)
return e
@jcompile('float64(float64)', nopython=True)
def topocentric_zenith_angle(topocentric_elevation_angle):
theta = 90 - topocentric_elevation_angle
return theta
@jcompile('float64(float64, float64, float64)', nopython=True)
def topocentric_astronomers_azimuth(topocentric_local_hour_angle,
topocentric_sun_declination,
observer_latitude):
num = np.sin(np.radians(topocentric_local_hour_angle))
denom = (np.cos(np.radians(topocentric_local_hour_angle))
* np.sin(np.radians(observer_latitude))
- np.tan(np.radians(topocentric_sun_declination))
* np.cos(np.radians(observer_latitude)))
gamma = np.degrees(np.arctan2(num, denom))
return gamma % 360
@jcompile('float64(float64)', nopython=True)
def topocentric_azimuth_angle(topocentric_astronomers_azimuth):
phi = topocentric_astronomers_azimuth + 180
return phi % 360
@jcompile('float64(float64)', nopython=True)
def sun_mean_longitude(julian_ephemeris_millennium):
M = (280.4664567 + 360007.6982779 * julian_ephemeris_millennium
+ 0.03032028 * julian_ephemeris_millennium**2
+ julian_ephemeris_millennium**3 / 49931
- julian_ephemeris_millennium**4 / 15300
- julian_ephemeris_millennium**5 / 2000000)
return M
@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def equation_of_time(sun_mean_longitude, geocentric_sun_right_ascension,
longitude_nutation, true_ecliptic_obliquity):
E = (sun_mean_longitude - 0.0057183 - geocentric_sun_right_ascension +
longitude_nutation * np.cos(np.radians(true_ecliptic_obliquity)))
# limit between 0 and 360
E = E % 360
# convert to minutes
E *= 4
greater = E > 20
less = E < -20
other = (E <= 20) & (E >= -20)
E = greater * (E - 1440) + less * (E + 1440) + other * E
return E
@jcompile('void(float64[:], float64[:], float64[:,:])', nopython=True,
nogil=True)
def solar_position_loop(unixtime, loc_args, out):
"""Loop through the time array and calculate the solar position"""
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
for i in range(unixtime.shape[0]):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
R = heliocentric_radius_vector(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
out[0, i] = v
out[1, i] = alpha
out[2, i] = delta
continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
alpha_prime = topocentric_sun_right_ascension(alpha, delta_alpha)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled.
"""
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst])
ulength = unixtime.shape[0]
result = np.empty((6, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
pvl_logger.warning('The number of threads is more than the length of' +
' the time array. Only using %s threads.',
ulength)
numthreads = ulength
if numthreads <= 1:
pvl_logger.debug('Only using one thread for calculation')
solar_position_loop(unixtime, loc_args, result)
return result
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
def solar_position_numpy(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False):
"""Calculate the solar position assuming unixtime is a numpy array. Note
this function will not work if the solar position functions were
compiled with numba.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
R = heliocentric_radius_vector(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst:
return v, alpha, delta
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
alpha_prime = topocentric_sun_right_ascension(alpha, delta_alpha)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
return theta, theta0, e, e0, phi, eot
def solar_position(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=False):
"""
Calculate the solar position using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled
and the code runs quickly. If not, the functions
still evaluate but use numpy instead.
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
lat : float
Latitude to calculate solar position for
lon : float
Longitude to calculate solar position for
elev : float
Elevation of location in meters
pressure : int or float
avg. yearly pressure at location in Pascals;
used for atmospheric correction
temp : int or float
avg. yearly temperature at location in
degrees C; used for atmospheric correction
delta_t : float, optional
Difference between terrestrial time and UT1.
By default, use USNO historical data and predictions
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads: int, optional
Number of threads to use for computation if numba>=0.17
is installed.
Returns
-------
Numpy Array with elements:
apparent zenith,
zenith,
elevation,
apparent_elevation,
azimuth,
equation_of_time
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
"""
if USE_NUMBA:
do_calc = solar_position_numba
else:
do_calc = solar_position_numpy
result = do_calc(unixtime, lat, lon, elev, pressure,
temp, delta_t, atmos_refract, numthreads,
sst)
if not isinstance(result, np.ndarray):
try:
result = np.array(result)
except Exception:
pass
return result
def transit_sunrise_sunset(dates, lat, lon, delta_t, numthreads):
"""
Calculate the sun transit, sunrise, and sunset
for a set of dates at a given location.
Parameters
----------
dates : array
Numpy array of ints/floats corresponding to the Unix time
for the dates of interest, must be midnight UTC (00:00+00:00)
on the day of interest.
lat : float
Latitude of location to perform calculation for
lon : float
Longitude of location
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
numthreads : int
Number to threads to use for calculation (if using numba)
Returns
-------
tuple : (transit, sunrise, sunset) localized to UTC
"""
if ((dates % 86400) != 0.0).any():
raise ValueError('Input dates must be at 00:00 UTC')
utday = (dates // 86400) * 86400
ttday0 = utday - delta_t
ttdayn1 = ttday0 - 86400
ttdayp1 = ttday0 + 86400
# index 0 is v, 1 is alpha, 2 is delta
utday_res = solar_position(utday, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
v = utday_res[0]
ttday0_res = solar_position(ttday0, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayn1_res = solar_position(ttdayn1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
ttdayp1_res = solar_position(ttdayp1, 0, 0, 0, 0, 0, delta_t,
0, numthreads, sst=True)
m0 = (ttday0_res[1] - lon - v) / 360
cos_arg = ((np.sin(np.radians(-0.8333)) - np.sin(np.radians(lat))
* np.sin(np.radians(ttday0_res[2]))) /
(np.cos(np.radians(lat)) * np.cos(np.radians(ttday0_res[2]))))
cos_arg[abs(cos_arg) > 1] = np.nan
H0 = np.degrees(np.arccos(cos_arg)) % 180
m = np.empty((3, len(utday)))
m[0] = m0 % 1
m[1] = (m[0] - H0 / 360)
m[2] = (m[0] + H0 / 360)
# need to account for fractions of day that may be the next or previous
# day in UTC
add_a_day = m[2] >= 1
sub_a_day = m[1] < 0
m[1] = m[1] % 1
m[2] = m[2] % 1
vs = v + 360.985647 * m
n = m + delta_t / 86400
a = ttday0_res[1] - ttdayn1_res[1]
a[abs(a) > 2] = a[abs(a) > 2] % 1
ap = ttday0_res[2] - ttdayn1_res[2]
ap[abs(ap) > 2] = ap[abs(ap) > 2] % 1
b = ttdayp1_res[1] - ttday0_res[1]
b[abs(b) > 2] = b[abs(b) > 2] % 1
bp = ttdayp1_res[2] - ttday0_res[2]
bp[abs(bp) > 2] = bp[abs(bp) > 2] % 1
c = b - a
cp = bp - ap
alpha_prime = ttday0_res[1] + (n * (a + b + c * n)) / 2
delta_prime = ttday0_res[2] + (n * (ap + bp + cp * n)) / 2
Hp = (vs + lon - alpha_prime) % 360
Hp[Hp >= 180] = Hp[Hp >= 180] - 360
h = np.degrees(np.arcsin(np.sin(np.radians(lat)) *
np.sin(np.radians(delta_prime)) +
np.cos(np.radians(lat)) *
np.cos(np.radians(delta_prime))
* np.cos(np.radians(Hp))))
T = (m[0] - Hp[0] / 360) * 86400
R = (m[1] + (h[1] + 0.8333) / (360 * np.cos(np.radians(delta_prime[1])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[1])))) * 86400
S = (m[2] + (h[2] + 0.8333) / (360 * np.cos(np.radians(delta_prime[2])) *
np.cos(np.radians(lat)) *
np.sin(np.radians(Hp[2])))) * 86400
S[add_a_day] += 86400
R[sub_a_day] -= 86400
transit = T + utday
sunrise = R + utday
sunset = S + utday
return transit, sunrise, sunset
| bsd-3-clause |
jwlawson/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans_test.py | 12 | 20083 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, 10)
self._infer_helper(kmeans, clusters, 1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
JeanKossaifi/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/ODDKernel_FeatureExtraction.py | 1 | 2394 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import numpy as np
import time
from sklearn import cross_validation
from skgraph.feature_extraction.graph import ODDSTVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
if __name__=='__main__':
if len(sys.argv)<0:
sys.exit("python ODDKernel_FeatureExtraction.py")
max_radius=4
la=1.4
#load bursi dataset
from skgraph import datasets
dat=datasets.load_graphs_bursi()
g_it=dat.graphs
print "Number of examples:",len(dat.graphs)
y=dat.target
ODDvec=ODDSTVectorizer.ODDSTVectorizer(r=max_radius,l=la)
print "Computing features"
start_time = time.time()
X=ODDvec.transform(g_it) #instance-featues matrix
elapsed_time = time.time() - start_time
print "Took %d s" % (elapsed_time)
print 'Instances: %d Features: %d with an avg of %d features per instance' % (X.shape[0], X.shape[1], X.getnnz()/X.shape[0])
print "Non zero different features %d" % (len(np.unique(X.nonzero()[1])))
#induce a predictive model
predictor = LinearSVC(n_iter=150,shuffle=True)
#predictor = SGDClassifier(n_iter=150,shuffle=True)
print "Training SGD classifier optimizing accuracy"
scores = cross_validation.cross_val_score(predictor, X, y,cv=10, scoring='accuracy')
import numpy as np
print('Accuracy: %.4f +- %.4f' % (np.mean(scores),np.std(scores)))
print "Training SGD classifier optimizing AUROC"
scores = cross_validation.cross_val_score(predictor, X, y,cv=10, scoring='roc_auc')
print('AUC ROC: %.4f +- %.4f' % (np.mean(scores),np.std(scores)))
| gpl-3.0 |
bamueh/dark-matter | dark/orfs.py | 3 | 2745 | import numpy as np
START_CODONS = set(['ATG'])
STOP_CODONS = set(['TAA', 'TAG', 'TGA'])
def findCodons(seq, codons):
"""
Find all instances of the codons in 'codons' in the given sequence.
seq: A Bio.Seq.Seq instance.
codons: A set of codon strings.
Return: a generator yielding matching codon offsets.
"""
seqLen = len(seq)
start = 0
while start < seqLen:
triplet = str(seq[start:start + 3])
if triplet in codons:
yield start
start = start + 3
def addORFs(fig, seq, minX, maxX, offsetAdjuster):
"""
fig is a matplotlib figure.
seq is a Bio.Seq.Seq.
minX: the smallest x coordinate.
maxX: the largest x coordinate.
featureEndpoints: an array of features as returned by addFeatures (may be
empty).
offsetAdjuster: a function to adjust feature X axis offsets for plotting.
"""
for frame in range(3):
target = seq[frame:]
for (codons, codonType, color) in (
(START_CODONS, 'start', 'green'),
(STOP_CODONS, 'stop', 'red')):
offsets = list(map(offsetAdjuster, findCodons(target, codons)))
if offsets:
fig.plot(offsets, np.tile(frame, len(offsets)), marker='.',
markersize=4, color=color, linestyle='None')
fig.axis([minX, maxX, -1, 3])
fig.set_yticks(np.arange(3))
fig.set_ylabel('Frame', fontsize=17)
fig.set_title('Subject start (%s) and stop (%s) codons' % (
', '.join(sorted(START_CODONS)), ', '.join(sorted(STOP_CODONS))),
fontsize=20)
def addReversedORFs(fig, seq, minX, maxX, offsetAdjuster):
"""
fig is a matplotlib figure.
seq is a Bio.Seq.Seq (the reverse complement of the sequence we're
plotting against).
minX: the smallest x coordinate.
maxX: the largest x coordinate.
offsetAdjuster: a function to adjust feature X axis offsets for plotting.
"""
for frame in range(3):
target = seq[frame:]
for (codons, codonType, color) in (
(START_CODONS, 'start', 'green'),
(STOP_CODONS, 'stop', 'red')):
offsets = [maxX - offsetAdjuster(offset)
for offset in findCodons(target, codons)]
if offsets:
fig.plot(offsets, np.tile(frame, len(offsets)), marker='.',
markersize=4, color=color, linestyle='None')
fig.axis([minX, maxX, -1, 3])
fig.set_yticks(np.arange(3))
fig.set_ylabel('Frame', fontsize=17)
fig.set_title('Reversed subject start (%s) and stop (%s) codons' %
(', '.join(sorted(START_CODONS)),
', '.join(sorted(STOP_CODONS))), fontsize=20)
| mit |
kmike/scikit-learn | examples/grid_search_text_feature_extraction.py | 3 | 4167 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving there name to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: Simplified BSD
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/numpy/lib/npyio.py | 42 | 71218 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| mit |
Xeralux/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 41 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
gweidner/incubator-systemml | src/main/python/tests/test_mllearn_df.py | 12 | 5320 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
sparkSession = SparkSession.builder.getOrCreate()
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
# Convert to DataFrame for i/o: current way to transfer data
logistic = LogisticRegression(sparkSession, transferUsingDF=True)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, transferUsingDF=True)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
BigUtrecht/BigUtrecht | analysis/distribution.py | 1 | 1598 | import matplotlib.pyplot as plt
from constants.spark import Session
from etl import parquet
def createDistributionGraph():
"""
Distribution analysis function
Calculates the distribution of cyclists over the different measurement points
Shows the results in a bar plot
:return: None
"""
with Session() as spark:
flow = parquet.readResults(spark, "flow")
flow.registerTempTable("flow")
locatie = parquet.readLocatie(spark)
locatie.registerTempTable("locatie")
meetpuntcodes = [str(i.MeetpuntCode) for i in
spark.sql("SELECT MeetpuntCode FROM locatie GROUP BY MeetpuntCode").collect()]
meetpuntcolumns = {}
map(lambda code: meetpuntcolumns.update({code: "flow_%s" % code}), meetpuntcodes)
avgflow = spark.sql("SELECT Tijd, MeetpuntCode, avg(Flow) Flow "
"FROM flow GROUP BY Tijd, MeetpuntCode "
"ORDER BY max(Timestamp)").toPandas()
groups = avgflow.groupby("MeetpuntCode")
grouplist = {}
map(lambda code: grouplist.update(
{code: groups.get_group(code).rename(index=str, columns={"Flow": meetpuntcolumns[code]})}), meetpuntcodes)
tijden = spark.sql("SELECT Tijd FROM flow GROUP BY Tijd").toPandas()
for code in meetpuntcodes:
tijden = tijden.join(grouplist[code], on="Tijd", rsuffix="_%s" % code)
tijden.plot(x="Tijd", y=meetpuntcolumns.values(), kind="bar", stacked=False)
plt.show()
if __name__ == '__main__':
createDistributionGraph()
| gpl-3.0 |
fossdevil/Assignments | Machine Learning/Assignment4/Assignment4.py | 1 | 4267 | from svmutil import *
import matplotlib.pyplot as plt
import numpy as np
# remove 'q' from variable 'string' if you wish to see the details regarding iterations, etc.
'''
PART I : Two Features
'''
# TwoFeatures
def twoFeature():
ytf,xtf = svm_read_problem('twofeature.txt');
x1 = [];
x2 = [];
colors = [];
for i in range(0,len(xtf)):
x1.append(xtf[i][1]);
x2.append(xtf[i][2]);
if (ytf[i] == -1.0):
colors.append('g');
else:
colors.append('r');
#Trying with various values of C
cvalues = [1,5,10,20,50,100];
for i in range(0,len(cvalues)):
# plotting points
plt.scatter(x1,x2,c=colors);
string = '-s 0 -t 0 -q -c '+ str(cvalues[i]); #remove -q for non-quiet mode
param = svm_parameter(string);
prob = svm_problem(ytf,xtf);
model = svm_train(prob,param);
plotTwoF(model,x1,x2,cvalues[i]);
plt.show();
# Plotting graphs for various C values
# 6 graphs will be opened for different C values.
def plotTwoF(model,x1,x2,c):
svc = model.get_sv_coef();
sv = model.get_SV();
x = [];
y = [];
alpha = []
for i in range(0,len(sv)):
x.append(sv[i][1]);
y.append(sv[i][2]);
alpha.append(svc[i][0]);
w1 = np.dot(x,alpha);
w2 = np.dot(y,alpha);
# w values
w = [w1,w2];
maxneg = -100;
minpos = 10000000;
for i in range(0,len(svc)):
if(svc[i][0] < 0):
if(maxneg < np.dot(w,[x[i],y[i]])):
maxneg = np.dot(w,[x[i],y[i]]);
else:
if(minpos > np.dot(w,[x[i],y[i]])):
minpos = np.dot(w,[x[i],y[i]]);
# b value
b = -1 * (maxneg + minpos)/2.0;
pltx = []
plty = []
minx = min(x1);
maxx = max(x1);
yval = 0;diff = (maxx-minx)/10.0;
for i in range(0,11):
pltx.append(minx + diff*i);
yval = (-1*(w1*(minx+diff*i) + b-1))/w2;
plty.append(yval);
# line plot
plt.plot(pltx,plty);
plt.title("C value = " + str(c));
plt.xlabel("x1");
plt.ylabel("x2");
'''
PART II: Email Classification
'''
# Email Classification
def email_class():
y50,x50 = svm_read_problem('email_train-50.txt');
y100,x100 = svm_read_problem('email_train-100.txt');
y400,x400 = svm_read_problem('email_train-400.txt');
yall,xall = svm_read_problem('email_train-all.txt');
# c = 1
model50 = svm_train(y50,x50, '-s 0 -t 0 -q -c 1');
model100 = svm_train(y100,x100,'-s 0 -t 0 -q -c 1');
model400 = svm_train(y400,x400,'-s 0 -t 0 -q -c 1');
modelall = svm_train(yall,xall,'-s 0 -t 0 -q -c 1');
y,x = svm_read_problem('email_test.txt');
p_labs50, p_acc50, p_vals50 = svm_predict(y, x, model50);
p_labs100, p_acc100, p_vals100 = svm_predict(y, x, model100);
p_labs400, p_acc400, p_vals400 = svm_predict(y, x, model400);
p_labsall, p_accall, p_valsall = svm_predict(y, x, modelall);
return p_acc50[0], p_acc100[0], p_acc400[0], p_accall[0];
# Print solution to file
def printAccuracy(p1,p2,p3,p4):
print("The solution can be seen above. If you wish, you may open Solution.txt for solution");
filename = "Solution.txt";
target = open(filename,'w');
target.write("The following is the accuracy on the test data for the various training data: \n");
target.write("For 50 documents, Accuracy = " + str(p1) + "%\n");
target.write("For 100 documents, Accuracy = " + str(p2) + "%\n");
target.write("For 400 documents, Accuracy = " + str(p3) + "%\n");
target.write("For all documents, Accuracy = " + str(p4) + "%\n");
target.write("Please note that there was no previous assignment on Naive Bayes classification so printed just accuracy through SVM on test data");
target.close();
def main():
print("Two features:");
print("6 graphs will be opened for various values of C(mentioned in the title). Please check and close each graph.\n");
twoFeature();
print("Email Classification(Linear model):"); # to change model change the -t value in svm_train in email_class. 1 for polynomial, 2 for radial, etc.
p1,p2,p3,p4 = email_class();
printAccuracy(p1,p2,p3,p4);
if __name__ == "__main__":
main();
| mit |
nukui-s/sscomdetection | generate_ge.py | 1 | 1096 | import igraph
import os
import numpy as np
import pandas as pd
from visualize import plot_degree_dist
def generate_gn_graph(n_node, n_com, p_in, p_out):
labels = np.zeros(n_node).astype(int)
slice_ = n_node // n_com
for c, i in enumerate(range(n_com)):
start = i*slice_
stop = (i+1) * slice_
labels[start:stop] = c
labels = list(labels)
b_sizes = [labels.count(c) for c in range(n_com)]
pref = np.zeros(shape=[n_com, n_com]) + p_out
for c in range(n_com):
pref[c,c] = p_in
pref = pref.tolist()
graph = igraph.GraphBase.SBM(n_node, pref, b_sizes)
elist = graph.get_edgelist()
return elist, labels
if __name__=='__main__':
n = 90
c = 3
p_in = 0.1
p_out = 0.001
name = "gn_%s_%s"%(str(n),str(c))
elist_path = os.path.join("data/", name + "_edge.pkl")
label_path = os.path.join("data/", name + "_label.pkl")
elist, labels = generate_gn_graph(n,c,p_in,p_out)
pd.to_pickle(elist, elist_path)
pd.to_pickle(labels, label_path)
print(len(elist))
plot_degree_dist(elist)
| apache-2.0 |
quadflor/Quadflor | Code/lucid_ml/classifying/kneighbour_l2r_classifier.py | 1 | 14671 | from sklearn.neighbors import NearestNeighbors, LSHForest
from classifying.batch_kneighbors import BatchKNeighbors
import numpy as np
from sklearn.base import BaseEstimator
import subprocess
from scipy.sparse.csr import csr_matrix
from nltk.translate import IBMModel1
from nltk.translate import AlignedSent
import sys
import scipy.sparse as sps
from sklearn.externals.joblib import Parallel, delayed
class KNeighborsL2RClassifier(BaseEstimator):
"""
This classifier first examines the labels that appear in the n-nearest documents and then generates
features for each of the labels. Given these features, a list-wise ranking algorithm then ranks the labels.
Finally, the k highest ranked labels are assigned as output.
The features that are generated are the following:
- the sum of the cosine similarities to all the documents in the n-neighborhood that the label is actually assigned to
- the total count of appearances of the label in the document
- the probability that the label translates to the document's title according to the IBM1-translation-model.
- the count of how often the label directly appears in the title
This algorithm uses the implementation from [RankLib] in the background. Therefore, it assumes a Java-
installation to be available and the RankLib-2.5.jar file to be in the same folder as this file.
The whole procedure's main idea was taken from [MLA]. However, as some of their techniques cannot be applied
in the same way to our application, some of the implementation details differ.
Note that, as opposed to the original paper, we need to decide which labels from the ranking to assign to the
document. In our approach, we take the top k elements from each list where k equals the average number of labels
assigned to a document in the training corpus rounded to an integer.
References
----------
[MLA] Huang, Minlie, Aurélie Névéol, and Zhiyong Lu.
"Recommending MeSH terms for annotating biomedical articles."
Journal of the American Medical Informatics Association 18.5 (2011): 660-667.
[RankLib] https://sourceforge.net/p/lemur/wiki/RankLib/
Parameters
----------
n_jobs: int, default=1
Number of jobs to use for extracting the training / test sets for l2r.
n_neighbors: int, default=20
The size of the neighborhood, from which labels are considered as input to the ranking algorithm
max_iterations: int, default=300
Given as input to the RankLib library. Determines the number of iterations to train the ranking classifier with.
count_concepts: boolean, default=False
Must indicate whether the feature vector of a document contains concepts or not.
number_of_concepts: boolean, default=0
Must indicate the number of concepts contained in the document's feature vector.
count_terms: terms, default=True
Must indicate whether the feature vector of a document contains terms or not.
training_validation_split: float, default = 1.0
Determines how much of the training data passed to fit() is used for training. Rest is used for validation.
algorithm_id: string, default = '7'
Specifies the id of the (list-wise) L2R algorithm to apply. Must be either '3' for AdaRank, '4' for Coordinate Ascent, '6' for LambdaMART, or '7' for ListNET.
"""
def __init__(self, use_lsh_forest=False, n_neighbors=20, max_iterations = 300, count_concepts = False, number_of_concepts = 0,
count_terms = False, training_validation_split = 0.8, algorithm_id = '7', l2r_metric = "ERR@k", n_jobs = 1, translation_probability = False, **kwargs ):
self.n_neighbors = n_neighbors
nn = LSHForest(n_neighbors=n_neighbors, **kwargs) if use_lsh_forest else NearestNeighbors(
n_neighbors=n_neighbors, **kwargs)
self.knn = BatchKNeighbors(nn)
self.y = None
self.max_iterations = max_iterations
self.count_concepts = count_concepts
self.count_terms = count_terms
self.number_of_concepts = number_of_concepts
self.training_validation_split = training_validation_split
self.algorithm_id = algorithm_id
self.l2r_metric = l2r_metric
self.n_jobs = n_jobs
self.translation_probability = translation_probability
def fit(self, X, y):
self.y = y
self.knn.fit(X)
if sps.issparse(y):
average_labels = int(np.round(np.mean(y.sum(axis = 1)), 0))
else:
average_labels = int(np.round(np.mean(np.sum(y, axis = 1)), 0))
self.topk = average_labels
# the neighbors contain the data point itself. need to search for one more neighbor remove the data point itself
# we may assume it to be the nearest neighbor. if some other data point were at the same position and had different labels
# this would affect the training negatively anyway
distances_to_neighbors,neighbor_id_lists = self.knn.kneighbors(X, return_distance=True, n_neighbors=self.n_neighbors + 1)
distances_to_neighbors = distances_to_neighbors[:, 1:]
neighbor_id_lists = neighbor_id_lists[:, 1:]
if self.translation_probability:
self._train_translation_model(X,y)
# create features for each label in a documents neighborhood and write them in a file
self._extract_and_write(X, neighbor_id_lists, distances_to_neighbors, y = y)
# use the RankLib library algorithm to create a ranking model
subprocess.call(["java", "-jar", "RankLib-2.5.jar", "-train", "l2r_train", "-tvs", str(self.training_validation_split),
"-save", "l2r_model", "-ranker" , self.algorithm_id, "-metric2t", self.l2r_metric, "-epoch", str(self.max_iterations),
"-norm", "zscore"])
def _train_translation_model(self, X, y):
translations = []
for row in range(0, X.shape[0]):
title = _recompose_title(X, row)
for label in _get_labels_of_row(row, y):
translations.append(AlignedSent(title, [label]))
ibm1 = IBMModel1(translations, 5)
self.ibm1 = ibm1
def _extract_and_write(self, X, neighbor_id_lists, distances_to_neighbors, fileName = "l2r_train", y = None):
labels_in_neighborhood = Parallel(n_jobs=self.n_jobs)(
delayed(_create_training_samples)(cur_doc, neighbor_list, X, y, cur_doc + 1, distances_to_neighbors,
self.count_concepts, self.count_terms, self.number_of_concepts,
self.ibm1 if self.n_jobs == 1 and self.translation_probability else None) for cur_doc, neighbor_list in enumerate(neighbor_id_lists))
doc_to_neighborhood_dict = self._merge_dicts(labels_in_neighborhood)
filenames = ["samples_" + str(qid + 1) + ".tmp" for qid in range(len(doc_to_neighborhood_dict))]
with open(fileName, 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
outfile.write('\n')
return doc_to_neighborhood_dict
def _merge_dicts(self, labels_in_neighborhood):
neighborhood_dict = {}
for i, labels in enumerate(labels_in_neighborhood):
neighborhood_dict[str(i + 1)] = labels
return neighborhood_dict
def _intersection_percent(self, all_labels, curr_labels):
return len(all_labels.intersection(curr_labels)) / len(curr_labels)
def _predict_scores(self, X):
distances, neighbor_id_lists = self.knn.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=True)
doc_to_neighborhood_dict = self._extract_and_write(X, neighbor_id_lists, distances, fileName="l2r_test", y = self.y)
# use the created ranking model to get a ranking
subprocess.call(["java", "-jar", "RankLib-2.5.jar", "-rank", "l2r_test", "-score", "test_scores", "-load", "l2r_model", "-norm", "zscore"])
scoresFile = open("test_scores", "r")
results = {}
for line in scoresFile:
qid, score = self._extract_score(line)
if str(qid) in results:
labels = results[str(qid)]
labels.append(score)
results[str(qid)] = labels
else:
results[str(qid)] = [score]
return self._extract_topk_score(doc_to_neighborhood_dict, results)
def predict_proba(self, X):
""" we set the probability of all labels we do not observe in the neighborhood at all to 0
(or rather to the smallest float value possible to be safe, because we don't normalize).
Otherwise, the probability of a label corresponds to the score assigned by the ranking algorithm.
"""
top_k_save = self.topk
self.topk = self.y.shape[1]
doc_to_neighborhood_dict = self._predict_scores(X)
self.topk = top_k_save
probabilities = np.zeros((X.shape[0], self.y.shape[1]))
probabilities.fill(sys.float_info.min)
for i in range(0,X.shape[0]):
for label, score in doc_to_neighborhood_dict[str(i + 1)]:
probabilities[i, label] = score
return probabilities
def predict(self, X):
predictions = csr_matrix((X.shape[0], self.y.shape[1]))
doc_to_neighborhood_dict = self._predict_scores(X)
for i in range(0,X.shape[0]):
for label, _ in doc_to_neighborhood_dict[str(i + 1)]:
predictions[i, label] = 1
return predictions
def _extract_score(self, line):
entries = line.split()
qid = entries[0]
score = entries[2]
return (qid, score)
def _extract_topk_score(self, doc_to_neighborhood_dict, results):
for key, value in results.items():
top_indices = self._get_top_indices(value)
top_values = self._get_top_values(value)
labels = doc_to_neighborhood_dict[key]
toplabels = [(labels[index], top_value) for index, top_value in zip(top_indices, top_values)]
doc_to_neighborhood_dict[key] = toplabels
return doc_to_neighborhood_dict
def _get_top_indices(self, array):
return self._maxEntries(array, self.topk)
def _get_top_values(self, array):
return self._maxValues(array, self.topk)
# this one was stolen here: http://stackoverflow.com/questions/12787650/finding-the-index-of-n-biggest-elements-in-python-array-list-efficiently
def _maxEntries(self,a,N):
return np.argsort(a)[::-1][:N]
def _maxValues(self,a,N):
return np.sort(a)[::-1][:N]
def _create_training_samples(curr_doc, neighbor_list, X, y, qid, distances_to_neighbors, count_concepts, count_terms, number_of_concepts, ibm1):
l2r_training_samples = open("samples_" + str(qid) + ".tmp", "w", encoding='utf-8')
labels_in_neighborhood = y[neighbor_list]
labels_per_neighbor = []
for i in range(0,labels_in_neighborhood.shape[0] - 1):
cols = _get_labels_of_row(i,labels_in_neighborhood)
labels_per_neighbor.append(cols)
all_labels_in_neighborhood = [label for labels in labels_per_neighbor for label in labels]
all_labels_in_neighborhood = list(set(all_labels_in_neighborhood))
for label in all_labels_in_neighborhood:
l2r_training_samples.write(_generate_line_for_label(label, qid, X, y, curr_doc, labels_per_neighbor, neighbor_list,
distances_to_neighbors[curr_doc], count_concepts, count_terms, number_of_concepts, ibm1))
return all_labels_in_neighborhood
def _get_labels_of_row(i,matrix):
labels_of_single_neighbor = matrix[i]
rows, cols = labels_of_single_neighbor.nonzero()
return cols
# we are generating a line per label according to the notation given in http://www.cs.cornell.edu/People/tj/svm_light/svm_rank.html
def _generate_line_for_label(label, qid, X, y, curr_doc, labels_per_neighbor, neighbor_list, distances_to_neighbor, count_concepts, count_terms, number_of_concepts, ibm1):
# set the target value to 1 (i.e., the label is ranked high), if the label is actually assigned to the current document, 0 otherwise
if(y != None):
label_in_curr_doc = 1 if label in _get_labels_of_row(curr_doc, y) else 0
else:
label_in_curr_doc = 0
features = _extract_features(label, curr_doc, labels_per_neighbor, neighbor_list, distances_to_neighbor, X, y, count_concepts, count_terms, number_of_concepts, ibm1)
featureString = " ".join([str(i + 1) + ":" + str(val) for i,val in enumerate(features)])
#if y != None:
line_for_label = str(label_in_curr_doc) + " "
#else:
line_for_label = line_for_label + "qid:" + str(qid) + " " + featureString + "\n"
return line_for_label
def _extract_features(label, curr_doc, labels_per_neighbor, neighbor_list, distances_to_neighbor, X, y, count_concepts, count_terms, number_of_concepts, ibm1):
sum_similarities = 0
for i, neighbor_list in enumerate(labels_per_neighbor):
if label in neighbor_list:
sum_similarities += distances_to_neighbor[i]
count_in_neighborhood = [some_label for labels in labels_per_neighbor for some_label in labels].count(label)
title = _recompose_title(X, curr_doc)
if not ibm1 is None:
translation_probability = 0
for word in title:
translation_probability += ibm1.translation_table[word][str(label)]
features = [count_in_neighborhood, sum_similarities, translation_probability]
else:
features = [count_in_neighborhood, sum_similarities]
if count_concepts and not count_terms:
features.append(1 if X[curr_doc,label] > 0 else 0)
elif count_concepts and count_terms:
features.append(1 if X[curr_doc, X.shape[1] - number_of_concepts + label] > 0 else 0)
return features
def _recompose_title(X, row):
title = []
for word in X[row].nonzero():
title.append(str(word))
return title
| bsd-3-clause |
RomainSabathe/kaggle_airbnb2015 | Code/lab.py | 1 | 10237 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import itertools as it
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.cross_validation import train_test_split, KFold
from sklearn.metrics import confusion_matrix
from score import calc_score
from toolbox import transform_to_submission_format, get_feature_importances
from random import choice
import xgboost as xgb
plt.style.use('ggplot')
""" Recovering the data """
print 'Recovering the data'
store = pd.HDFStore('../Data/enhanced_learning_restricted_data.h5')
data = store['data_users']
data = data.fillna(0)
store.close()
# Deleting a part of the data
#base_size = 30000
#data_country_7 = data[data['country_destination'] == 7]
#_, rows_to_delete = train_test_split(data_country_7, train_size=base_size, random_state=1)
#data = data.drop(rows_to_delete.index, axis=0)
#
#data_country_10 = data[data['country_destination'] == 10]
#_, rows_to_delete = train_test_split(data_country_10, train_size=int(base_size/1.5), random_state=1)
#data = data.drop(rows_to_delete.index, axis=0)
#
#data_country_11 = data[data['country_destination'] == 11]
#_, rows_to_delete = train_test_split(data_country_11, train_size=base_size/8, random_state=1)
#data = data.drop(rows_to_delete.index, axis=0)
data_learn, data_test = train_test_split(data, test_size=0.3, random_state=2)
data_learn, data_valid = train_test_split(data, test_size=0.7, random_state=2)
X = data
X = X.drop('country_destination', axis=1)
y = data['country_destination']
X_learn = data_learn
X_learn = X_learn.drop('country_destination', axis=1)
y_learn = pd.DataFrame(index=X_learn.index)
y_learn['country'] = data_learn['country_destination']
X_valid = data_valid
X_valid = X_valid.drop('country_destination', axis=1)
y_valid = pd.DataFrame(index=X_valid.index)
y_valid['country'] = data_valid['country_destination']
X_test = data_test
X_test = X_test.drop('country_destination', axis=1)
y_test = pd.DataFrame(index=X_test.index)
y_test['country'] = data_test['country_destination']
kf = KFold(len(data), n_folds=3, random_state=1)
""" Learning """
print 'Learning'
""" #### Test: model parameters #### """
#test_name='model_parameters'
#criterion_choices = ['gini', 'entropy']
#n_estimators_choices = range(1, 750)
#min_samples_split_choices = range(10, 5000)
#max_depth_choices = range(2, 50)
#min_samples_leaf_choices = range(10, 5000)
#
#n_experiments = 1000
#
#criterion_exp = []
#n_estimators_exp = []
#min_samples_split_exp = []
#min_samples_leaf_exp = []
#max_depth_exp = []
#scores = []
#
#for n_experiment in range(n_experiments):
# criterion_exp.append(choice(criterion_choices))
# n_estimators_exp.append(choice(n_estimators_choices))
# min_samples_split_exp.append(choice(max_depth_choices))
# min_samples_leaf_exp.append(choice(min_samples_leaf_choices))
# max_depth_exp.append(choice(max_depth_choices))
#
# classif = RandomForestClassifier(n_estimators=n_estimators_exp[-1],
# criterion=criterion_exp[-1],
# random_state=0,
# min_samples_split=min_samples_split_exp[-1],
# max_depth=max_depth_exp[-1],
# min_samples_leaf=min_samples_leaf_exp[-1],
# n_jobs=-1)
#
# classif.fit(X_learn, y_learn)
#
# """ Converting the proba into 5 best guesses """
# proba_countries = classif.predict_proba(X_valid)
# find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
# best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
# predictions = pd.DataFrame(best_guesses, index=y_valid.index)
#
# print '--------------------'
# print 'criterion = %s' % criterion_exp[-1]
# print 'min_samples_split = %s' % min_samples_split_exp[-1]
# print 'max_depth = %s' % max_depth_exp[-1]
# print 'min_samples_leaf = %s' % min_samples_leaf_exp[-1]
# scores.append(calc_score(predictions, y_valid))
# print 'Score = %s' % scores[-1]
#
# if n_experiment % 20 == 0 and n_experiment > 0:
# data_score = pd.DataFrame({'Criterion': criterion_exp,
# 'n_estimators': n_estimators_exp,
# 'min_samples_split': min_samples_split_exp,
# 'max_depth': max_depth_exp,
# 'min_samples_leaf': min_samples_leaf_exp,
# 'score': scores})
#
# data_score.to_csv('../Lab/%s.csv' % test_name)
""" #### Test: number of features #### """
#test_name='number_features'
#scores = []
#
#classif_base = RandomForestClassifier(n_estimators=186,
# criterion='entropy',
# random_state=0,
# min_samples_split=30,
# max_depth=16,
# min_samples_leaf=11,
# n_jobs=-1)
#classif_base.fit(X_learn, y_learn)
#
#fi = [(name, value) for (name,value) in zip(X_learn.columns.values.tolist(),
# classif_base.feature_importances_)]
#fi = sorted(fi, key=lambda x: x[1], reverse=True)
#features = [f[0] for f in fi]
#features_to_keep = features[:200]
#
#""" Plotting figure importances """
##fi_ = [x[1] for x in fi]
##plt.bar(range(len(fi_)), fi_)
##print features[:10]
##plt.show()
#
#with open('../Data/features_to_keep.dt', 'w') as f:
# pickle.dump(features_to_keep, f)
#
#for n_features in range(1, len(features)):
# classif = RandomForestClassifier(**classif_base.get_params())
#
# X_learn_ = X_learn[features[:n_features]]
# X_valid_ = X_valid[features[:n_features]]
# classif.fit(X_learn_, y_learn)
#
# """ Converting the proba into 5 best guesses """
# proba_countries = classif.predict_proba(X_valid_)
# find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
# best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
# predictions = pd.DataFrame(best_guesses, index=y_valid.index)
#
# print '--------------------'
# print 'n_features = %s' % n_features
# scores.append(calc_score(predictions, y_valid))
# print 'Score = %s' % scores[-1]
#
# if n_features % 5 == 0:
# data_score = pd.DataFrame({'n_features': range(n_features),
# 'score': scores})
#
# data_score.to_csv('../Lab/%s.csv' % test_name)
""" Test: simple test """
#with open('../Data/features_to_keep.dt', 'r') as f:
# features_to_keep = pickle.load(f)
scores = []
#for train,test in kf:
for _ in range(1):
#X_learn, X_valid, y_learn, y_valid = X.iloc[train], X.iloc[test], \
# y.iloc[train], y.iloc[test]
#y_valid = pd.DataFrame({'country': y_valid})
#y_test = pd.DataFrame({'country': y_test})
""" RANDOM FOREST """
classif_base = RandomForestClassifier(n_estimators=300,
criterion='entropy',
random_state=0,
min_samples_split=1000,
max_depth=10,
min_samples_leaf=100,
n_jobs=-1)
classif = RandomForestClassifier(**classif_base.get_params())
""" GRADIENT BOOSTING """
#classif_base = GradientBoostingClassifier(loss='deviance',
# learning_rate=0.25,
# n_estimators=20,
# max_depth=5,
# min_samples_split=50,
# min_samples_leaf=100,
# random_state=0,
# verbose=True)
#classif = GradientBoostingClassifier(**classif_base.get_params())
""" XGBOOST """
xg_train = xgb.DMatrix(X_learn, label=y_learn)
xg_valid = xgb.DMatrix(X_valid, label=y_valid)
xg_test = xgb.DMatrix(X_test, label=y_test)
param = {}
param['objective'] = 'multi:softprob'
param['eta'] = 0.3
param['gamma'] = 5.
param['max_depth'] = 6
param['learning_rate'] = 0.1
param['subsample'] = 1.
param['colsample_bytree'] = 1.
param['min_child_weight'] = 100
param['silent'] = 1
param['nthread'] = 4
param['num_class'] = 12
watchlist = [ (xg_train, 'train'), (xg_valid, 'test') ]
num_boost_round = 20
early_stopping_rounds = 5
classif = xgb.train(param, xg_train, early_stopping_rounds=early_stopping_rounds,
num_boost_round=num_boost_round,
evals=watchlist)
#proba_countries = classif.predict( xg_test, ntree_limit=classif.best_ntree_limit )
#classif.fit(X_learn, y_learn)
""" Converting the proba into 5 best guesses """
#proba_countries = classif.predict_proba(X_valid_)
def score(X, y):
X = xgb.DMatrix(X, label=y)
proba_countries = classif.predict( X, ntree_limit=classif.best_ntree_limit )
#proba_countries = classif.predict_proba(X)
find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
predictions = pd.DataFrame(best_guesses, index=y.index)
print calc_score(predictions, y)
score(X_learn, y_learn)
score(X_valid, y_valid)
score(X_test, y_test)
#print np.array(get_feature_importances(classif, X_learn_)[:20])
#import pdb; pdb.set_trace()
#miss_rows = predictions[predictions[0] != y_valid['country']]
#miss_rows = pd.concat([y_valid.loc[miss_rows.index], miss_rows], axis=1)
#confmat = confusion_matrix(miss_rows.iloc[:,0], miss_rows.iloc[:,1])
#miss_rows_710 = miss_rows[(miss_rows['country']==10) & (miss_rows[0]==7)]
#import pdb; pdb.set_trace()
#print '----------------------'
#print 'Mean score = %s' % np.mean(scores)
#print 'Std score = %s' % np.std(scores)
| mit |
danking/hail | hail/python/hail/experimental/ldscsim.py | 2 | 41334 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simulation framework for testing LDSC
Models for SNP effects:
- Infinitesimal (can simulate n correlated traits)
- Spike & slab (can simulate up to 2 correlated traits)
- Annotation-informed
Features:
- Field aggregation tools for annotation-informed model and
population stratification with many covariates.
- Automatic adjustment of genetic correlation parameters
to allow for the joint simulation of up to 100 randomly
correlated phenotypes.
- Methods for binarizing phenotypes to have a certain prevalence
and for adding ascertainment bias to binarized phenotypes.
@author: nbaya
"""
import hail as hl
from hail.typecheck import typecheck, oneof, nullable
from hail.expr.expressions import expr_float64, expr_int32, expr_array, expr_call
from hail.matrixtable import MatrixTable
from hail.table import Table
from hail.utils.java import Env
import numpy as np
import pandas as pd
import scipy.stats as stats
@typecheck(mt=MatrixTable,
genotype=oneof(expr_int32,
expr_float64,
expr_call),
h2=(oneof(float,
int,
list,
np.ndarray)),
pi=nullable(oneof(float,
int,
list,
np.ndarray)),
rg=nullable(oneof(float,
int,
list,
np.ndarray)),
annot=nullable(oneof(expr_float64,
expr_int32)),
popstrat=nullable(oneof(expr_int32,
expr_float64)),
popstrat_var=nullable(oneof(float,
int)),
exact_h2=bool)
def simulate_phenotypes(mt, genotype, h2, pi=None, rg=None, annot=None, popstrat=None,
popstrat_var=None, exact_h2=False):
r"""Simulate phenotypes for testing LD score regression.
Simulates betas (SNP effects) under the infinitesimal, spike & slab, or
annotation-informed models, depending on parameters passed. Optionally adds
population stratification.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` containing genotypes to be used. Also should contain
variant annotations as row fields if running the annotation-informed
model or covariates as column fields if adding population stratification.
genotype : :class:`.Expression` or :class:`.CallExpression`
Entry field containing genotypes of individuals to be used for the
simulation.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`
SNP-based heritability of simulated trait.
pi : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Probability of SNP being causal when simulating under the spike & slab
model.
rg : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Genetic correlation between traits.
annot : :class:`.Expression`, optional
Row field to use as our aggregated annotations.
popstrat: :class:`.Expression`, optional
Column field to use as our aggregated covariates for adding population
stratification.
exact_h2: :obj:`bool`, optional
Whether to exactly simulate ratio of variance of genetic component of
phenotype to variance of phenotype to be h2. If `False`, ratio will be
h2 in expectation. Observed h2 in the simulation will be close to
expected h2 for large-scale simulations.
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` with simulated betas and phenotypes, simulated according
to specified model.
"""
h2 = h2.tolist() if type(h2) is np.ndarray else ([h2] if type(h2) is not list else h2)
pi = pi.tolist() if type(pi) is np.ndarray else pi
uid = Env.get_uid(base=100)
mt = annotate_all(mt=mt,
row_exprs={} if annot is None else {'annot_' + uid: annot},
col_exprs={} if popstrat is None else {'popstrat_' + uid: popstrat},
entry_exprs={'gt_' + uid: genotype.n_alt_alleles() if genotype.dtype is hl.dtype('call') else genotype})
mt, pi, rg = make_betas(mt=mt,
h2=h2,
pi=pi,
annot=None if annot is None else mt['annot_' + uid],
rg=rg)
mt = calculate_phenotypes(mt=mt,
genotype=mt['gt_' + uid],
beta=mt['beta'],
h2=h2,
popstrat=None if popstrat is None else mt['popstrat_' + uid],
popstrat_var=popstrat_var,
exact_h2=exact_h2)
mt = annotate_all(mt=mt,
global_exprs={'ldscsim': hl.struct(**{'h2': h2[0] if len(h2) == 1 else h2,
**({} if pi == [None] else {'pi': pi}),
**({} if rg == [None] else {'rg': rg[0] if len(rg) == 1 else rg}),
**({} if annot is None else {'is_annot_inf': True}),
**({} if popstrat is None else {'is_popstrat_inf': True}),
**({} if popstrat_var is None else {'popstrat_var': popstrat_var}),
'exact_h2': exact_h2
})})
mt = _clean_fields(mt, uid)
return mt
@typecheck(mt=MatrixTable,
h2=(oneof(float,
int,
list,
np.ndarray)),
pi=nullable(oneof(float,
int,
list,
np.ndarray)),
annot=nullable(oneof(expr_float64,
expr_int32)),
rg=nullable(oneof(float,
int,
list,
np.ndarray)))
def make_betas(mt, h2, pi=None, annot=None, rg=None):
r"""Generates betas under different models.
Simulates betas (SNP effects) under the infinitesimal, spike & slab, or
annotation-informed models, depending on parameters passed.
Parameters
----------
mt : :class:`.MatrixTable`
MatrixTable containing genotypes to be used. Also should contain
variant annotations as row fields if running the annotation-informed
model or covariates as column fields if adding population stratification.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`
SNP-based heritability of simulated trait(s).
pi : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Probability of SNP being causal when simulating under the spike & slab
model. If doing two-trait spike & slab `pi` is a list of probabilities for
overlapping causal SNPs (see docstring of :func:`.multitrait_ss`)
annot : :class:`.Expression`, optional
Row field of aggregated annotations for annotation-informed model.
rg : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Genetic correlation between traits.
Returns
-------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with betas as a row field, simulated according to specified model.
pi : :obj:`list`
Probability of a SNP being causal for different traits, possibly altered
from input `pi` if covariance matrix for multitrait simulation was not
positive semi-definite.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix for multitrait simulation was not positive semi-definite.
"""
h2 = h2.tolist() if type(h2) is np.ndarray else (
[h2] if type(h2) is not list else h2)
pi = pi.tolist() if type(pi) is np.ndarray else (
[pi] if type(pi) is not list else pi)
rg = rg.tolist() if type(rg) is np.ndarray else (
[rg] if type(rg) is not list else rg)
assert (all(x >= 0 and x <= 1 for x in h2)
), 'h2 values must be between 0 and 1'
assert (pi is not [None]) or all(
x >= 0 and x <= 1 for x in pi), 'pi values for spike & slab must be between 0 and 1'
assert (rg == [None] or all(x >= -1 and x <= 1 for x in rg)
), 'rg values must be between -1 and 1 or None'
if annot is not None: # multi-trait annotation-informed
assert rg == [
None], 'Correlated traits not supported for annotation-informed model'
h2 = h2 if type(h2) is list else [h2]
annot_sum = mt.aggregate_rows(hl.agg.sum(annot))
mt = mt.annotate_rows(beta=hl.literal(h2).map(
lambda x: hl.rand_norm(0, hl.sqrt(annot * x / (annot_sum * M)))))
elif len(h2) > 1 and (pi == [None] or pi == [1]): # multi-trait correlated infinitesimal
mt, rg = multitrait_inf(mt=mt,
h2=h2,
rg=rg)
elif len(h2) == 2 and len(pi) > 1 and len(rg) == 1: # two trait correlated spike & slab
print('multitrait ss')
mt, pi, rg = multitrait_ss(mt=mt,
h2=h2,
rg=0 if rg is [None] else rg[0],
pi=pi)
elif len(h2) == 1 and len(pi) == 1: # single trait infinitesimal/spike & slab
M = mt.count_rows()
pi_temp = 1 if pi == [None] else pi[0]
mt = mt.annotate_rows(beta=hl.rand_bool(
pi_temp) * hl.rand_norm(0, hl.sqrt(h2[0] / (M * pi_temp))))
else:
raise ValueError('Parameters passed do not match any models.')
return mt, pi, rg
@typecheck(mt=MatrixTable,
h2=nullable(oneof(float,
int,
list,
np.ndarray)),
rg=nullable(oneof(float,
int,
list)),
cov_matrix=nullable(np.ndarray),
seed=nullable(int))
def multitrait_inf(mt, h2=None, rg=None, cov_matrix=None, seed=None):
r"""Generates correlated betas for multi-trait infinitesimal simulations for
any number of phenotypes.
Parameters
----------
mt : :class:`.MatrixTable`
MatrixTable for simulated phenotype.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Desired SNP-based heritability (:math:`h^2`) of simulated traits.
If `h2` is ``None``, :math:`h^2` is based on diagonal of `cov_matrix`.
rg : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`, optional
Desired genetic correlation (:math:`r_g`) between simulated traits.
If simulating more than two correlated traits, `rg` should be a list
of :math:`rg` values corresponding to the upper right triangle of the
covariance matrix. If `rg` is ``None`` and `cov_matrix` is ``None``, :math:`r_g`
is assumed to be 0 between traits. If `rg` and `cov_matrix` are both
not None, :math:`r_g` values from `cov_matrix` take precedence.
cov_matrix : :class:`numpy.ndarray`, optional
Covariance matrix for traits, **unscaled by :math:`M`**, the number of SNPs.
Overrides `h2` and `rg` even when `h2` or `rg` are not ``None``.
seed : :obj:`int`, optional
Seed for random number generator. If `seed` is ``None``, `seed` is set randomly.
Returns
-------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with simulated SNP effects as a row field of arrays.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix was not positive semi-definite.
"""
uid = Env.get_uid(base=100)
h2 = (h2.tolist() if type(h2) is np.ndarray else ([h2] if type(h2) is not list else h2))
rg = rg.tolist() if type(rg) is np.ndarray else ([rg] if type(rg) is not list else rg)
assert (all(x >= 0 and x <= 1 for x in h2)), 'h2 values must be between 0 and 1'
assert h2 is not [None] or cov_matrix is not None, 'h2 and cov_matrix cannot both be None'
seed = seed if seed is not None else int(str(Env.next_seed())[:8])
M = mt.count_rows()
if cov_matrix is not None:
n_phens = cov_matrix.shape[0]
else:
n_phens = len(h2)
if rg == [None]:
print(f'Assuming rg=0 for all {n_phens} traits')
rg = [0] * int((n_phens ** 2 - n_phens) / 2)
assert (all(x >= -1 and x <= 1 for x in rg)
), 'rg values must be between 0 and 1'
cov, rg = get_cov_matrix(h2, rg)
cov = (1 / M) * cov
# seed random state for replicability
randstate = np.random.RandomState(int(seed))
betas = randstate.multivariate_normal(
mean=np.zeros(n_phens), cov=cov, size=[M, ])
df = pd.DataFrame([0] * M, columns=['beta'])
tb = hl.Table.from_pandas(df)
tb = tb.add_index().key_by('idx')
tb = tb.annotate(beta=hl.literal(betas.tolist())[hl.int32(tb.idx)])
mt = mt.add_row_index(name='row_idx' + uid)
mt = mt.annotate_rows(beta=tb[mt['row_idx' + uid]]['beta'])
mt = _clean_fields(mt, uid)
return mt, rg
@typecheck(mt=MatrixTable,
h2=oneof(list,
np.ndarray),
pi=oneof(list,
np.ndarray),
rg=oneof(float,
int),
seed=nullable(int))
def multitrait_ss(mt, h2, pi, rg=0, seed=None):
r"""Generates spike & slab betas for simulation of two correlated phenotypes.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` for simulated phenotype.
h2 : :obj:`list` or :class:`numpy.ndarray`
Desired SNP-based heritability of simulated traits.
pi : :obj:`list` or :class:`numpy.ndarray`
List of proportion of SNPs: :math:`p_{TT}`, :math:`p_{TF}`, :math:`p_{FT}`
:math:`p_{TT}` is the proportion of SNPs that are causal for both traits,
:math:`p_{TF}` is the proportion of SNPs that are causal for trait 1 but not trait 2,
:math:`p_{FT}` is the proportion of SNPs that are causal for trait 2 but not trait 1.
rg : :obj:`float` or :obj:`int`
Genetic correlation between traits.
seed : :obj:`int`, optional
Seed for random number generator. If `seed` is ``None``, `seed` is set randomly.
Warning
-------
May give inaccurate results if chosen parameters make the covariance matrix
not positive semi-definite. Covariance matrix is likely to not be positive
semi-definite when :math:`p_{TT}` is small and rg is large.
Returns
-------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with simulated SNP effects as a row field of arrays.
pi : :obj:`list` or :class:`numpy.ndarray`
List of proportion of SNPs: :math:`p_{TT}`, :math:`p_{TF}`, :math:`p_{FT}`.
Possibly altered if covariance matrix of traits was not positive semi-definite.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix was not positive semi-definite.
"""
assert sum(pi) <= 1, "probabilities of being causal must sum to be less than 1"
seed = seed if seed is not None else int(str(Env.next_seed())[:8])
ptt, ptf, pft, pff = pi[0], pi[1], pi[2], 1 - sum(pi)
cov_matrix = np.asarray([[1 / (ptt + ptf), rg / ptt], [rg / ptt, 1 / (ptt + pft)]])
M = mt.count_rows()
# seed random state for replicability
randstate = np.random.RandomState(int(seed))
if np.any(np.linalg.eigvals(cov_matrix) < 0):
print('adjusting parameters to make covariance matrix positive semidefinite')
rg0, ptt0 = rg, ptt
while np.any(np.linalg.eigvals(cov_matrix) < 0): # check positive semidefinite
rg = round(0.99 * rg, 6)
ptt = round(ptt + (pff) * 0.001, 6)
cov_matrix = np.asarray(
[[1 / (ptt + ptf), rg / ptt], [rg / ptt, 1 / (ptt + pft)]])
pff0, pff = pff, 1 - sum([ptt, ptf, pft])
print(f'rg: {rg0} -> {rg}\nptt: {ptt0} -> {ptt}\npff: {pff0} -> {pff}')
pi = [ptt, ptf, pft, pff]
beta = randstate.multivariate_normal(mean=np.zeros(2),
cov=cov_matrix,
size=[int(M), ])
zeros = np.zeros(shape=int(M)).T
beta_matrix = np.stack((beta, np.asarray([beta[:, 0], zeros]).T,
np.asarray([zeros, zeros]).T,
np.asarray([zeros, beta[:, 1]]).T), axis=1)
idx = np.random.choice(a=[0, 1, 2, 3],
size=int(M),
p=[ptt, ptf, pft, pff])
betas = beta_matrix[range(int(M)), idx, :]
betas[:, 0] *= (h2[0] / M) ** (1 / 2)
betas[:, 1] *= (h2[1] / M) ** (1 / 2)
df = pd.DataFrame([0] * M, columns=['beta'])
tb = hl.Table.from_pandas(df)
tb = tb.add_index().key_by('idx')
tb = tb.annotate(beta=hl.literal(betas.tolist())[hl.int32(tb.idx)])
mt = mt.add_row_index()
mt = mt.annotate_rows(beta=tb[mt.row_idx]['beta'])
return mt, pi, [rg]
@typecheck(h2=oneof(list,
np.ndarray),
rg=oneof(list,
np.ndarray),
psd_rg=bool)
def get_cov_matrix(h2, rg, psd_rg=False):
r"""Creates covariance matrix for simulating correlated SNP effects.
Given a list of heritabilities and a list of genetic correlations, :func:`.get_cov_matrix`
constructs the covariance matrix necessary to draw from a multivariate normal
distribution to generate correlated SNP effects.
Examples
--------
Suppose we have three traits enumerated as trait 1, trait 2, and trait 3.
Each trait has a heritability: :math:`h^2_1`,:math:`h^2_2`,:math:`h^2_3`
Traits have the following genetic correlations: :math:`r_{g, 12}`,:math:`r_{g, 13}`, :math:`r_{g, 23}`
The ordering of indices in the subscript is arbitrary (e.g. :math:`r_{g, 12}` = :math:`r_{g, 21}`)
as both values are the genetic correlation between trait 1 and trait 2.
We can calculate :math:`\rho_{g,ab}`, the genetic covariance between two traits :math:`a` and :math:`b`,
as :math:`\rho_{g,ab}=r_{g,ab}\sqrt{h^2_a\cdot h^2_b}`. The covariance matrix is thus:
.. math::
\begin{pmatrix}
h^2_1 & r_{g, 12}\sqrt{h^2_1\cdot h^2_2} & r_{g, 13}\sqrt{h^2_1\cdot h^2_3} \\
r_{g, 12}\sqrt{h^2_1\cdot h^2_2} & h^2_2 & r_{g, 23}\sqrt{h^2_2\cdot h^2_3} \\
r_{g, 13}\sqrt{h^2_1\cdot h^2_3} & r_{g, 23}*\sqrt{h^2_2\cdot h^2_3} & h^2_3
\end{pmatrix}
Now suppose we have four traits with the following heritabilities (:math:`h^2`): 0.1, 0.3, 0.2, 0.6.
That is, trait 1 has an :math:`h^2` of 0.1, trait 2 has an :math:`h^2` of 0.3 and so on.
Suppose the genetic correlations (:math:`r_g`) between traits are the following:
trait 1 & trait 2 :math:`r_g` = 0.4
trait 1 & trait 3 :math:`r_g` = 0.3
trait 1 & trait 4 :math:`r_g` = 0.1
trait 2 & trait 3 :math:`r_g` = 0.2
trait 2 & trait 4 :math:`r_g` = 0.15
trait 3 & trait 4 :math:`r_g` = 0.6
To obtain the covariance matrix corresponding to this scenario :math:`h^2` values are
ordered according to user specification and :math:`r_g` values are ordered by the
order in which the corresponding genetic covariance terms will appear in the
covariance matrix, reading lines in the upper triangular matrix from left to
right, top to bottom (read first row left to right, read second row left to
right, etc.), exluding the diagonal.
>>> cov_matrix, rg = get_cov_matrix(h2=[0.1, 0.3, 0.2, 0.6], rg=[0.4, 0.3, 0.1, 0.2, 0.15, 0.6])
>>> cov_matrix
array([[0.1 , 0.06928203, 0.04242641, 0.0244949 ],
[0.06928203, 0.3 , 0.04898979, 0.06363961],
[0.04242641, 0.04898979, 0.2 , 0.2078461 ],
[0.0244949 , 0.06363961, 0.2078461 , 0.6 ]])
The diagonal corresponds directly to `h2`, the list of h2 values for all traits.
In the upper triangular matrix, excluding the diagonal, the entry :math:`(a, b)`,
where :math:`a` and :math:`b` are in :math:`{1,2,3,4}`, is the genetic covariance
(:math:`\rho_g`) between traits :math:`a` and :math:`b`.
Genetic covariance is calculated as :math:`\rho_g= r_g*\sqrt{h^2_a*h^2_b}`
where :math:`r_g` is the genetic correlation between traits :math:`a` and
:math:`b` and :math:`h^2_a` and :math:`h^2_b` are heritabilities corresponding
to traits :math:`a` and :math:`b`.
Notes
-----
Covariance matrix is not scaled by number of SNPs.
If the h2 and rg parameters passed cause the resulting covariance matrix to
not be positive semidefinite, this may cause the distribution of SNP effects
generated by this covariance matrix to not have the properties specified by
the h2 and rg parameters. To automatically adjust rg values so that the
covariance matrix is positive semidefinite, set `psd_rg` = True.
Parameters
----------
h2 : :obj:`list` or :class:`numpy.ndarray`
:math:`h^2` values for traits. :math:`h^2` values in list should be
ordered by their order in the diagonal of the covariance array, reading
from top left to bottom right.
rg : :obj:`list` or :class:`numpy.ndarray`
:math:`r_g` values for traits. :math:`r_g` values should be ordered in
the order they appear in the upper triangle of the covariance matrix,
from left to right, top to bottom.
psd_rg : :obj:`bool`
Whether to automatically adjust rg values to get a positive semi-definite
covariance matrix, which ensures that SNP effects simulated with that
covariance matrix have the desired variance and correlation properties
specified by the h2 and rg parameters.
Returns
-------
cov_matrix : :class:`numpy.ndarray`
Covariance matrix calculated using `h2` and (possibly altered) `rg` values.
rg : :obj:`list`
Genetic correlation between traits, possibly altered from input `rg` if
covariance matrix was not positive semi-definite.
"""
assert (all(x >= 0 and x <= 1 for x in h2)
), 'h2 values must be between 0 and 1'
assert (all(x >= -1 and x <= 1 for x in rg)
), 'rg values must be between -1 and 1'
rg = np.asarray(rg) if type(rg) is list else rg
n_rg = len(rg)
n_h2 = len(h2)
# expected number of rg values, given number of traits
exp_n_rg = int((n_h2 ** 2 - n_h2) / 2)
assert n_rg == exp_n_rg, f'The number of rg values given is {n_rg}, expected {exp_n_rg}'
cor = np.zeros(shape=(n_h2, n_h2))
# set upper triangle of correlation matrix to be rg
cor[np.triu_indices(n=n_h2, k=1)] = rg
cor += cor.T
cor[np.diag_indices(n=n_h2)] = 1
if psd_rg:
cor0 = cor
cor = _nearpsd(cor)
idx = np.triu_indices(n=n_h2, k=1)
maxlines = 50
msg = ['Adjusting rg values to make covariance matrix positive semidefinite']
msg += ([(f'{cor0[idx[0][i],idx[1][i]]} -> {cor[idx[0][i],idx[1][i]]}') for i in range(n_rg)] if n_rg <= maxlines
else [(f'{cor0[idx[0][i],idx[1][i]]} -> {cor[idx[0][i],idx[1][i]]}') for i in range(maxlines)]
+ [f'[ printed first {maxlines} rg changes -- omitted {n_rg - maxlines} ]'])
print('\n'.join(msg))
rg = np.ravel(cor[idx])
S = np.diag(h2) ** (1 / 2)
cov_matrix = S @ cor @ S # covariance matrix decomposition
# check positive semidefinite
if not np.all(np.linalg.eigvals(cov_matrix) >= 0) and not psd_rg:
msg = 'WARNING: Covariance matrix is not positive semidefinite.\n'
msg += 'Multivariate Gaussian distributions generated with this \n'
msg += 'covariance matrix may not have the desired h2 and rg.\n'
msg += 'To make the covariance matrix positive semidefinite,\n'
msg += 'adjust h2 and rg values or set psd_rg=True.'
print(msg)
rg = rg.tolist()
return cov_matrix, rg
@typecheck(A=np.ndarray)
def _nearpsd(A):
r""" Obtain the "closest" positive semidefinite matrix to A."""
n = A.shape[0]
eigval, eigvec = np.linalg.eig(A)
val = np.matrix(np.maximum(eigval, 0))
vec = np.matrix(eigvec)
T = 1 / (np.multiply(vec, vec) * val.T)
T = np.matrix(np.sqrt(np.diag(np.array(T).reshape((n)))))
B = T * vec * np.diag(np.array(np.sqrt(val)).reshape((n)))
out = np.real(B * B.T)
return out
@typecheck(mt=MatrixTable,
genotype=oneof(expr_int32,
expr_float64,
expr_call),
beta=oneof(expr_float64,
expr_array(expr_float64)),
h2=oneof(float,
int,
list,
np.ndarray),
popstrat=nullable(oneof(expr_int32,
expr_float64)),
popstrat_var=nullable(oneof(float,
int)),
exact_h2=bool)
def calculate_phenotypes(mt, genotype, beta, h2, popstrat=None, popstrat_var=None,
exact_h2=False):
r"""Calculates phenotypes by multiplying genotypes and betas.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with all relevant fields passed as parameters.
genotype : :class:`.Expression` or :class:`.CallExpression`
Entry field of genotypes.
beta : :class:`.Expression`
Row field of SNP effects.
h2 : :obj:`float` or :obj:`int` or :obj:`list` or :class:`numpy.ndarray`
SNP-based heritability (:math:`h^2`) of simulated trait. Can only be
``None`` if running annotation-informed model.
popstrat : :class:`.Expression`, optional
Column field containing population stratification term.
popstrat_var : :obj:`float` or :obj:`int`
Variance of population stratification term.
exact_h2: :obj:`bool`
Whether to exactly simulate ratio of variance of genetic component of
phenotype to variance of phenotype to be h2. If `False`, ratio will be
h2 in expectation. Observed h2 in the simulation will be close to
expected h2 for large-scale simulations.
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` with simulated phenotype as column field.
"""
print('calculating phenotype')
h2 = h2.tolist() if type(h2) is np.ndarray else (
[h2] if type(h2) is not list else h2)
assert popstrat_var is None or (
popstrat_var >= 0), 'popstrat_var must be non-negative'
uid = Env.get_uid(base=100)
mt = annotate_all(mt=mt,
row_exprs={'beta_' + uid: beta},
col_exprs={} if popstrat is None else {'popstrat_' + uid: popstrat},
entry_exprs={'gt_' + uid: genotype.n_alt_alleles() if genotype.dtype is hl.dtype('call') else genotype})
mt = mt.filter_rows(hl.agg.stats(mt['gt_' + uid]).stdev > 0)
mt = normalize_genotypes(mt['gt_' + uid])
if mt['beta_' + uid].dtype == hl.dtype('array<float64>'): # if >1 traits
if exact_h2:
raise ValueError('exact_h2=True not supported for multitrait simulations')
else:
mt = mt.annotate_cols(y_no_noise=hl.agg.array_agg(
lambda beta: hl.agg.sum(beta * mt['norm_gt']), mt['beta_' + uid]))
mt = mt.annotate_cols(
y=mt.y_no_noise + hl.literal(h2).map(lambda x: hl.rand_norm(0, hl.sqrt(1 - x))))
else:
if exact_h2 and min([h2[0], 1 - h2[0]]) != 0:
print('exact h2')
mt = mt.annotate_cols(**{'y_no_noise_' + uid: hl.agg.sum(mt['beta_' + uid] * mt['norm_gt'])})
y_no_noise_stdev = mt.aggregate_cols(hl.agg.stats(mt['y_no_noise_' + uid]).stdev)
mt = mt.annotate_cols(y_no_noise=hl.sqrt(h2[0]) * mt['y_no_noise_' + uid] / y_no_noise_stdev) # normalize genetic component of phenotype to have variance of exactly h2
mt = mt.annotate_cols(**{'noise_' + uid: hl.rand_norm(0, hl.sqrt(1 - h2[0]))})
noise_stdev = mt.aggregate_cols(hl.agg.stats(mt['noise_' + uid]).stdev)
mt = mt.annotate_cols(noise=hl.sqrt(1 - h2[0]) * mt['noise_' + uid] / noise_stdev)
mt = mt.annotate_cols(y=mt.y_no_noise + hl.sqrt(1 - h2[0]) * mt['noise_' + uid] / noise_stdev)
else:
mt = mt.annotate_cols(y_no_noise=hl.agg.sum(mt['beta_' + uid] * mt['norm_gt']))
mt = mt.annotate_cols(y=mt.y_no_noise + hl.rand_norm(0, hl.sqrt(1 - h2[0])))
if popstrat is not None:
var_factor = 1 if popstrat_var is None else (
popstrat_var ** (1 / 2)) / mt.aggregate_cols(hl.agg.stats(mt['popstrat_' + uid])).stdev
mt = mt.rename({'y': 'y_no_popstrat'})
mt = mt.annotate_cols(y=mt.y_no_popstrat
+ mt['popstrat_' + uid] * var_factor)
mt = _clean_fields(mt, uid)
return mt
@typecheck(genotype=oneof(expr_int32,
expr_float64,
expr_call))
def normalize_genotypes(genotype):
r"""Normalizes genotypes to have mean 0 and variance 1 at each SNP
Parameters
----------
genotype : :class:`.Expression` or :class:`.CallExpression`
Entry field of genotypes.
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` with normalized genotypes.
"""
uid = Env.get_uid(base=100)
mt = genotype._indices.source
mt = mt.annotate_entries(
**{'gt_' + uid: genotype.n_alt_alleles() if genotype.dtype is hl.dtype('call') else genotype})
mt = mt.annotate_rows(**{'gt_stats_' + uid: hl.agg.stats(mt['gt_' + uid])})
# TODO: Add MAF filter to remove invariant SNPs?
mt = mt.annotate_entries(norm_gt=(mt['gt_' + uid] - mt['gt_stats_' + uid].mean) / mt['gt_stats_' + uid].stdev)
mt = _clean_fields(mt, uid)
return mt
@typecheck(mt=MatrixTable,
str_expr=str)
def _clean_fields(mt, str_expr):
r"""Removes fields with names that have `str_expr` in them.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` with fields to be removed.
str_expr : :class:`str`
string to filter names of fields to remove.
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` with specified fields removed.
"""
all_fields = list(mt.col) + list(mt.row) + list(mt.entry) + list(mt.globals)
return mt.drop(*(x for x in all_fields if str_expr in x))
@typecheck(mt=MatrixTable,
row_exprs=dict,
col_exprs=dict,
entry_exprs=dict,
global_exprs=dict)
def annotate_all(mt, row_exprs={}, col_exprs={}, entry_exprs={}, global_exprs={}):
r"""Equivalent of _annotate_all, but checks source MatrixTable of exprs"""
exprs = {**row_exprs, **col_exprs, **entry_exprs, **global_exprs}
for key, value in exprs.items():
if type(value) == expr_float64 or type(value) == expr_int32:
assert value._indices.source == mt, 'Cannot combine expressions from different source objects.'
return mt._annotate_all(row_exprs, col_exprs, entry_exprs, global_exprs)
@typecheck(tb=oneof(MatrixTable,
Table),
coef_dict=nullable(dict),
str_expr=nullable(str),
axis=str)
def agg_fields(tb, coef_dict=None, str_expr=None, axis='rows'):
r"""Aggregates by linear combination fields matching either keys in `coef_dict`
or `str_expr`. Outputs the aggregation in a :class:`.MatrixTable` or :class:`.Table`
as a new row field "agg_annot" or a new column field "agg_cov".
Parameters
----------
tb : :class:`.MatrixTable` or :class:`.Table`
:class:`.MatrixTable` or :class:`.Table` containing fields to be aggregated.
coef_dict : :obj:`dict`, optional
Coefficients to multiply each field. The coefficients are specified by
`coef_dict` value, the row (or col) field name is specified by `coef_dict` key.
If not included, coefficients are assumed to be 1.
str_expr : :class:`str`, optional
String expression to match against row (or col) field names.
axis : :class:`str`
Either 'rows' or 'cols'. If 'rows', this aggregates across row fields.
If 'cols', this aggregates across col fields. If tb is a Table, axis = 'rows'.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
:class:`.MatrixTable` or :class:`.Table` containing aggregation field.
"""
assert (str_expr is not None or coef_dict is not None), "str_expr and coef_dict cannot both be None"
assert axis == 'rows' or axis == 'cols', "axis must be 'rows' or 'cols'"
coef_dict = get_coef_dict(tb=tb, str_expr=str_expr,
ref_coef_dict=coef_dict, axis=axis)
axis_field = 'annot' if axis == 'rows' else 'cov'
annotate_fn = (MatrixTable.annotate_rows if axis == 'rows' else MatrixTable.annotate_cols) if type(
tb) is MatrixTable else Table.annotate
tb = annotate_fn(self=tb, **{'agg_' + axis_field: 0})
print(
f'Fields and associated coefficients used in {axis_field} aggregation: {coef_dict}')
for field, coef in coef_dict.items():
tb = annotate_fn(
self=tb, **{'agg_' + axis_field: tb['agg_' + axis_field] + coef * tb[field]})
return tb
@typecheck(tb=oneof(MatrixTable,
Table),
str_expr=nullable(str),
ref_coef_dict=nullable(dict),
axis=str)
def get_coef_dict(tb, str_expr=None, ref_coef_dict=None, axis='rows'):
r"""Gets either col or row fields matching `str_expr` and take intersection
with keys in coefficient reference dict.
Parameters
----------
tb : :class:`.MatrixTable` or :class:`.Table`
:class:`.MatrixTable` or :class:`.Table` containing row (or col) for `coef_dict`.
str_expr : :class:`str`, optional
String expression pattern to match against row (or col) fields. If left
unspecified, the intersection of field names is only between existing
row (or col) fields in `mt` and keys of `ref_coef_dict`.
ref_coef_dict : :obj:`dict`, optional
Reference coefficient dictionary with keys that are row (or col) field
names from which to subset. If not included, coefficients are assumed to be 1.
axis : :class:`str`
Field type in which to search for field names. Options: 'rows', 'cols'
Returns
-------
coef_dict : :obj:`dict`
Coefficients to multiply each field. The coefficients are specified by
`coef_dict` value, the row (or col) field name is specified by `coef_dict` key.
"""
assert (str_expr is not None or ref_coef_dict is not None), "str_expr and ref_coef_dict cannot both be None"
assert axis == 'rows' or axis == 'cols', "axis must be 'rows' or 'cols'"
fields_to_search = (tb.row if axis == 'rows' or type(tb)
is Table else tb.col)
# when axis='rows' we're searching for annotations, axis='cols' searching for covariates
axis_field = 'annotation' if axis == 'rows' else 'covariate'
if str_expr is None:
# take all row (or col) fields in mt matching keys in coef_dict
coef_dict = {k: ref_coef_dict[k]
for k in ref_coef_dict.keys() if k in fields_to_search}
# if intersect is empty: return error
assert len(
coef_dict) > 0, f'None of the keys in ref_coef_dict match any {axis[:-1]} fields'
return coef_dict # return subset of ref_coef_dict
else:
# str_expr search in list of row (or col) fields
fields = [rf for rf in list(fields_to_search) if str_expr in rf]
assert len(
fields) > 0, f'No {axis[:-1]} fields matched str_expr search: {str_expr}'
if ref_coef_dict is None:
print(f'Assuming coef = 1 for all {axis_field}s')
return {k: 1 for k in fields}
in_ref_coef_dict = set(fields).intersection(
set(ref_coef_dict.keys())) # fields in ref_coef_dict
# if >0 fields returned by search are not in ref_coef_dict
if in_ref_coef_dict != set(fields):
# if none of the fields returned by search are in ref_coef_dict
assert len(
in_ref_coef_dict) > 0, f'None of the {axis_field} fields in ref_coef_dict match search results'
fields_to_ignore = set(fields).difference(in_ref_coef_dict)
print(
f'Ignored fields from {axis_field} search: {fields_to_ignore}')
print('To include ignored fields, change str_expr to match desired fields')
fields = list(in_ref_coef_dict)
return {k: ref_coef_dict[k] for k in fields}
@typecheck(mt=MatrixTable,
y=expr_int32,
P=oneof(int,
float))
def ascertainment_bias(mt, y, P):
r"""Adds ascertainment bias to a binary phenotype to give it a sample
prevalence of `P` = cases/(cases+controls).
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` containing binary phenotype to be used.
y : :class:`.Expression`
Column field of binary phenotype.
P : :obj:`int` or :obj:`float`
Desired "sample prevalence" of phenotype.
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` containing binary phenotype with prevalence of approx. P
"""
assert P >= 0 and P <= 1, 'P must be in [0,1]'
uid = Env.get_uid(base=100)
mt = mt.annotate_cols(y_w_asc_bias=y)
y_stats = mt.aggregate_cols(hl.agg.stats(mt.y_w_asc_bias))
K = y_stats.mean
n = y_stats.n
assert abs(
P - K) < 1, 'Specified sample prevalence is incompatible with population prevalence.'
if P < K:
p = (1 - K) * P / (K * (1 - P))
con = mt.filter_cols(mt.y_w_asc_bias == 0)
cas = mt.filter_cols(mt.y_w_asc_bias == 1).add_col_index(
name='col_idx_' + uid)
keep = round(p * n * K) * [1] + round((1 - p) * n * K) * [0]
cas = cas.annotate_cols(
**{'keep_' + uid: hl.literal(keep)[hl.int32(cas['col_idx_' + uid])]})
cas = cas.filter_cols(cas['keep_' + uid] == 1)
cas = _clean_fields(cas, uid)
mt = cas.union_cols(con)
elif P > K:
p = K * (1 - P) / ((1 - K) * P)
cas = mt.filter_cols(mt.y_w_asc_bias == 1)
con = mt.filter_cols(mt.y_w_asc_bias == 0).add_col_index(
name='col_idx_' + uid)
keep = round(p * n * (1 - K)) * [1] + round((1 - p) * n * (1 - K)) * [0]
con = con.annotate_cols(
**{'keep_' + uid: hl.literal(keep)[hl.int32(con['col_idx_' + uid])]})
con = con.filter_cols(con['keep_' + uid] == 1)
con = _clean_fields(con, uid)
mt = con.union_cols(cas)
return mt
@typecheck(mt=MatrixTable,
y=oneof(expr_int32,
expr_float64),
K=oneof(int,
float),
exact=bool)
def binarize(mt, y, K, exact=False):
r"""Binarize phenotype `y` such that it has prevalence `K` = cases/(cases+controls)
Uses inverse CDF of Gaussian to set binarization threshold when `exact` = False,
otherwise uses ranking to determine threshold.
Parameters
----------
mt : :class:`.MatrixTable`
:class:`.MatrixTable` containing phenotype to be binarized.
y : :class:`.Expression`
Column field of phenotype.
K : :obj:`int` or :obj:`float`
Desired "population prevalence" of phenotype.
exact : :obj:`bool`
Whether to get prevalence as close as possible to `K` (does not use inverse CDF)
Returns
-------
:class:`.MatrixTable`
:class:`.MatrixTable` containing binary phenotype with prevalence of approx. `K`
"""
if exact:
key = list(mt.col_key)
uid = Env.get_uid(base=100)
mt = mt.annotate_cols(**{'y_' + uid: y})
tb = mt.cols().order_by('y_' + uid)
tb = tb.add_index('idx_' + uid)
n = tb.count()
# "+ 1" because of zero indexing
tb = tb.annotate(y_binarized=tb['idx_' + uid] + 1 <= round(n * K))
tb, mt = tb.key_by('y_' + uid), mt.key_cols_by('y_' + uid)
mt = mt.annotate_cols(y_binarized=tb[mt['y_' + uid]].y_binarized)
mt = mt.key_cols_by(*map(lambda x: mt[x], key))
else: # use inverse CDF
y_stats = mt.aggregate_cols(hl.agg.stats(y))
threshold = stats.norm.ppf(1 - K, loc=y_stats.mean, scale=y_stats.stdev)
mt = mt.annotate_cols(y_binarized=y > threshold)
return mt
| mit |
18praveenb/toil-rnaseq-sc | src/toil_rnaseq_sc/rnaseq_sc_cgl_plot_functions.py | 1 | 14121 | #!/usr/bin/env python2.7
# This is a modified version of a source file from the repository "scRNA-Seq-tcc-prep" by the Pachter Lab which can be found here: https://github.com/pachterlab/scRNA-Seq-TCC-prep/blob/201469940e138c2f09bcd058a6291b17794f7c88/notebooks/10xResults.ipynb
# The citation for the paper with which this repository is associated is Ntranos, V., Kamath, G. M., Zhang, J. M., Pachter, L. & Tse, D. N. Fast and accurate single-cell RNA-seq analysis by clustering of transcript-compatibility counts. Genome Biology 17, 112 (2016).
# The entire source of "scRNA-Seq-tcc prep" is also used in Dockerized form in this pipeline.
# The original "scRNA-Seq-TCC-prep" repository was released under GPLv3, as is this repository (and thus this source file). For more details, see the 'README.md' of this repository which contains the full text of the GPL.
from __future__ import print_function
import os
import pickle
import sys
from subprocess import CalledProcessError
from urlparse import urlparse
import numpy as np
from bd2k.util.files import mkdir_p
from sklearn import cluster,manifold
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from toil.lib.docker import dockerCall
from toil_lib.files import tarball_files, copy_files
from toil_lib.urls import s3am_upload
from string import lstrip
# Matplotlib backend nonsense
import matplotlib
if sys.platform == 'darwin':
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SC3_OUTPUT_DIRECTORY = "SC3"
MATRIX_TSV_FILENAME = "matrix.tsv"
MATRIX_CELLS_FILENAME = "matrix.cells"
DOCKER_WORK_DIR = "/data"
# TODO: Refactor to use ids
def run_data_analysis(job, config, tcc_matrix_id, pwise_dist_l1_id, nonzero_ec_id, kallisto_matrix_id, matrix_tsv_id, matrix_cells_id):
"""
Generates graphs and plots of results. Uploads images to savedir location.
:param job: toil job
:param config: toil job configuration
:param tcc_matrix_id: jobstore location of TCC matrix (.dat)
:param pwise_dist_l1_id: jobstore location of L1 pairwise distance (.dat)
:param nonzero_ec_id: jobstore loation of nonzero ec (.dat)
:param kallisto_matrix_id: id of kallisto output matrix (.ec)
:param matrix_tsv_id: id of kallisto output matrix (.tsv)
:param matrix_cells_id: id of kallisto output matrix (.cells)
"""
# source: https://github.com/pachterlab/scRNA-Seq-TCC-prep (/blob/master/notebooks/10xResults.ipynb)
# extract output
job.fileStore.logToMaster('Performing data analysis')
# read files
work_dir = job.fileStore.getLocalTempDir()
tcc_matrix = job.fileStore.readGlobalFile(tcc_matrix_id, os.path.join(work_dir, "TCC_matrix.dat"))
pwise_dist_l1 = job.fileStore.readGlobalFile(pwise_dist_l1_id, os.path.join(work_dir, "pwise_dist_L1.dat"))
nonzero_ec = job.fileStore.readGlobalFile(nonzero_ec_id, os.path.join(work_dir, "nonzero_ec.dat"))
kallisto_matrix = job.fileStore.readGlobalFile(kallisto_matrix_id, os.path.join(work_dir, 'kallisto_matrix.ec'))
matrix_tsv = job.fileStore.readGlobalFile(matrix_tsv_id, os.path.join(work_dir, MATRIX_TSV_FILENAME))
matrix_cells = job.fileStore.readGlobalFile(matrix_cells_id, os.path.join(work_dir, MATRIX_CELLS_FILENAME))
##############################################################
# load dataset
with open(os.path.join(work_dir, "TCC_matrix.dat"), 'rb') as f:
tcc_matrix = pickle.load(f)
with open(os.path.join(work_dir, "pwise_dist_L1.dat"), 'rb') as f:
pwise_dist_l1 = pickle.load(f)
with open(os.path.join(work_dir, "nonzero_ec.dat"), 'rb') as f:
nonzero_ec = pickle.load(f)
ecfile_dir = os.path.join(work_dir, 'kallisto_matrix.ec')
eclist = np.loadtxt(ecfile_dir, dtype=str)
tcc = tcc_matrix.T
T_norm = normalize(tcc_matrix, norm='l1', axis=0)
t_normt = T_norm.transpose()
num_of_cells = np.shape(tcc_matrix)[1]
print("NUM_OF_CELLS =", num_of_cells)
print("NUM_OF_nonzero_EC =", np.shape(tcc_matrix)[0])
#################################
EC_dict = {}
for i in range(np.shape(eclist)[0]):
EC_dict[i] = [int(x) for x in eclist[i, 1].split(',')]
union = set()
for i in nonzero_ec:
new = [tx for tx in EC_dict[i] if tx not in union] # filter out previously seen transcripts
union.update(new)
NUM_OF_TX_inTCC = len(union)
print("NUM_OF_Transcripts =", NUM_OF_TX_inTCC) # number of distinct transcripts in nonzero eq. classes
##############################################################
# inspect
# sort eq. classes based on size
size_of_ec = [len(EC_dict[i]) for i in nonzero_ec]
ec_idx = [i[0] for i in sorted(enumerate(size_of_ec), key=lambda x: x[1])]
index_ec = np.array(ec_idx)
ec_sort_map = {}
nonzero_ec_srt = [] # init
for i in range(len(nonzero_ec)):
nonzero_ec_srt += [nonzero_ec[index_ec[i]]]
ec_sort_map[nonzero_ec[index_ec[i]]] = i
sumi = np.array(tcc_matrix.sum(axis=1))
sumi_sorted = sumi[index_ec]
total_num_of_umis = int(sumi_sorted.sum())
total_num_of_umis_per_cell = np.array(tcc_matrix.sum(axis=0))[0, :]
print("Total number of UMIs =", total_num_of_umis)
#################################
fig, ax1 = plt.subplots()
ax1.plot(sorted(total_num_of_umis_per_cell)[::-1], 'b-', linewidth=2.0)
ax1.set_title('UMI counts per cell')
ax1.set_xlabel('cells (sorted by UMI counts)')
ax1.set_ylabel('UMI counts')
ax1.set_yscale("log", nonposy='clip')
ax1.grid(True)
ax1.grid(True, 'minor')
umi_counts_per_cell = os.path.join(work_dir, "UMI_counts_per_cell.png")
plt.savefig(umi_counts_per_cell, format='png')
fig, ax1 = plt.subplots()
ax1.plot(sorted(sumi.reshape(np.shape(sumi)[0]))[::-1], 'r-', linewidth=2.0)
ax1.set_title('UMI counts per eq. class')
ax1.set_xlabel('ECs (sorted by UMI counts)')
ax1.set_ylabel('UMI counts')
ax1.set_yscale("log", nonposy='clip')
ax1.grid(True)
ax1.grid(True, 'minor')
umi_counts_per_class = os.path.join(work_dir, "UMI_counts_per_class.png")
plt.savefig(umi_counts_per_class, format='png')
cell_nonzeros = np.array(((T_norm != 0)).sum(axis=0))[0]
fig, ax1 = plt.subplots()
ax1.plot(total_num_of_umis_per_cell, cell_nonzeros, '.g', linewidth=2.0)
ax1.set_title('UMI counts vs nonzero ECs')
ax1.set_xlabel('total num of umis per cell')
ax1.set_ylabel('total num of nonzero ecs per cell')
ax1.set_yscale("log", nonposy='clip')
ax1.set_xscale("log", nonposy='clip')
ax1.grid(True)
ax1.grid(True, 'minor')
umi_counts_vs_nonzero_ecs = os.path.join(work_dir, "UMI_counts_vs_nonzero_ECs.png")
plt.savefig(umi_counts_vs_nonzero_ecs, format='png')
# TCC MEAN-VARIANCE
#todo verify this works
TCC_var=np.var(tcc.todense(),axis=0)
TCC_mean=np.mean(tcc.todense(),axis=0)
TCC_mean=np.array(TCC_mean)[0]
TCC_var=np.array(TCC_var)[0]
fig = plt.figure()
N=tcc.sum()
C=tcc.shape[0]
ax = plt.gca()
ax.plot(TCC_mean ,TCC_var,'.', c='blue', alpha=0.5, markeredgecolor='none')
xlims=[0.0001,10*TCC_mean.max()]
ax.set_xlim(xlims)
ax.set_ylim([0.0001,10*TCC_var.max()])
ax.set_yscale('symlog')
ax.set_xscale('symlog')
ax.plot(xlims, [(C-1)*(xlims[0])**2, (C-1)*(xlims[1])**2], color='g', linestyle='-', linewidth=2)
ax.plot(xlims, [(xlims[0]), (xlims[1])], color='k', linestyle='--', linewidth=1)
ax.set_title("TCC Mean-Variance ["+str(tcc.shape[1])+" TCCs in "+str(C)+" Cells]")
ax.set_xlabel("mean(TCC)")
ax.set_ylabel("var(TCC)")
tcc_mean_variance = os.path.join(work_dir, "TCC_mean_variance.png")
plt.savefig(tcc_mean_variance, format='png')
##############################################################
# clustering
#################################
# t-SNE
x_tsne = tSNE_pairwise(2, pwise_dist_l1)
#################################
# spectral clustering
n_clusters = config.n_clusters
similarity_mat = pwise_dist_l1.max() - pwise_dist_l1
labels_spectral = spectral(n_clusters, similarity_mat)
spectral_clustering = stain_plot(x_tsne, labels_spectral, [], "TCC -- tSNE, spectral clustering with " + str(n_clusters) + " n_clusters", work_dir=work_dir,
filename="spectral_clustering_tSNE")
#################################
# affinity propagation
pref = -np.median(pwise_dist_l1) * np.ones(num_of_cells)
labels_aff = AffinityProp(-pwise_dist_l1, pref, 0.5)
np.unique(labels_aff)
affinity_propagation_tsne = stain_plot(x_tsne, labels_aff, [], "TCC -- tSNE, affinity propagation", work_dir,
"affinity_propagation_tSNE")
#################################
# pca
pca = PCA(n_components=2)
x_pca = pca.fit_transform(t_normt.todense())
affinity_propagation_pca = stain_plot(x_pca, labels_aff, [], "TCC -- PCA, affinity propagation", work_dir,
"affinity_propagation_PCA")
# SC3
outfilePath = job.fileStore.getLocalTempFile()
SC3OutputPath = os.path.join(work_dir, SC3_OUTPUT_DIRECTORY)
os.mkdir(SC3OutputPath)
shouldUseSC3Output = True
with open(outfilePath, "r+") as outfile:
def dockerPathTo(resource): return os.path.join(DOCKER_WORK_DIR, resource)
def boolForR(aBool): return "TRUE" if aBool else "FALSE"
try:
dockerCall(job, tool='rscript', workDir=work_dir, parameters=map(str, [config.min_k, config.max_k, dockerPathTo(MATRIX_TSV_FILENAME), dockerPathTo(MATRIX_CELLS_FILENAME), dockerPathTo(SC3_OUTPUT_DIRECTORY), boolForR(config.use_estimated_k), boolForR(config.debug)]), outfile=outfile)
pass
except CalledProcessError:
outfile.seek(0, 0)
job.fileStore.logToMaster("Docker failed with the following log: " + str(outfile.read()))
shouldUseSC3Output = False
# build tarfile of output plots
output_files = [umi_counts_per_cell, umi_counts_per_class, umi_counts_vs_nonzero_ecs, tcc_mean_variance,
spectral_clustering, affinity_propagation_tsne, affinity_propagation_pca, outfilePath] + ([os.path.join(work_dir, SC3_OUTPUT_DIRECTORY, x) for x in os.listdir(SC3OutputPath)] if shouldUseSC3Output else [])
tarball_files(tar_name='single_cell_plots.tar.gz', file_paths=output_files, output_dir=work_dir)
# return file id for consolidation
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'single_cell_plots.tar.gz'))
def AffinityProp(D, pref, damp):
"""
Perform SKLearn affinity propagation (clustering) with specified data and parameters, returning labels.
:param pref: preference parameter for the affinity propagation
:param damp: damping parameter for the affinity propagation
:return: labels
"""
aff = cluster.AffinityPropagation(affinity='precomputed',
preference=pref, damping=damp, verbose=True)
labels = aff.fit_predict(D)
return labels
def spectral(n, D):
"""
Perform spectral clustering on the distance matrix.
:param n: Number of clusters (for some reason, this may not equal the number displayed on the stain plot?)
:param D: Distance matrix to analyze
:return: labels from the spectral clustering
"""
spectral = cluster.SpectralClustering(n_clusters=n, affinity='precomputed')
spectral.fit(D)
labels = spectral.labels_
return labels
def tSNE_pairwise(n, D):
"""
Perform t-SNE dimensionality reduction on the distance matrix D, using n components.
:param n: the number of components to use (passed as n_components to sklearn.manifold.TSNE.__init__)
:param D: Distance matrix to be processed
:return: t-SNE reduced version of D
"""
tsne = manifold.TSNE(n_components=n, random_state=213, metric='precomputed', n_iter=2000, verbose=1);
X_tsne = tsne.fit_transform(D);
return X_tsne
def stain_plot(X, labels, stain, title, work_dir, filename, filetype='png', nc=2, ax_lim=0, marksize=46):
"""
Create a matplotlib plot from the specified parameters, including cluster labels and dimensionally reduced points to plot
:param X: the reduced matrix returned by a dimensionality reduction routine e.g. tSNE or PCA
:param labels: the labels to use to group the points into clusters
:param stain: labels to stain
:param title: plot title
:param work_dir: working directory to create the file
:param filename: name of the file to be saved to work_dir
:param filetype: extension of the created file
:param nc: number of columns in the legend
:param ax_lim: limits of x- and y- axes (e.g. ax_lim = 3 -> [-3, 3] x [-3, 3] bounding box)
:param marksize: size of the scatter-plot points that are NOT stained (stained are always 146
"""
file_location = os.path.join(work_dir, filename + "." + filetype)
unique_labels = np.unique(labels)
N = len(unique_labels)
max_value = 16581375 # 255**3
interval = int(max_value / N)
colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)]
color = [(int(i[:2], 16) / float(255), int(i[2:4], 16) / float(255),
int(i[4:], 16) / float(255)) for i in colors]
i = 0;
plt.figure(figsize=(15, 10))
for label in unique_labels:
ind = np.squeeze(labels == label)
if label in stain:
plt.scatter(X[ind, 0], X[ind, 1], c='red', s=146, edgecolor='black',
lw=0.5, alpha=1, marker='*', label=label)
else:
plt.scatter(X[ind, 0], X[ind, 1], c=color[i], s=marksize, edgecolor='lightgray',
lw=0.5, label=label)
i += 1
plt.title(title)
plt.gray()
plt.legend(loc='upper right', bbox_to_anchor=(1.18, 1.01), ncol=nc)
if ax_lim > 0:
plt.xlim([-ax_lim, ax_lim])
plt.ylim([-ax_lim, ax_lim])
plt.axis('off')
plt.savefig(file_location, format=filetype)
return file_location
| gpl-3.0 |
rgommers/statsmodels | statsmodels/regression/tests/test_robustcov.py | 1 | 27218 | # -*- coding: utf-8 -*-
"""Testing OLS robust covariance matrices against STATA
Created on Mon Oct 28 15:25:14 2013
Author: Josef Perktold
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_allclose, assert_equal, assert_warns
from statsmodels.regression.linear_model import OLS, WLS
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
from statsmodels.tools.sm_exceptions import InvalidTestWarning
from .results import results_macro_ols_robust as res
from .results import results_grunfeld_ols_robust_cluster as res2
#test_hac_simple():
class CheckOLSRobust(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
rtol = getattr(self, 'rtol', 1e-10)
assert_allclose(res1.params, res2.params, rtol=rtol)
assert_allclose(self.bse_robust, res2.bse, rtol=rtol)
assert_allclose(self.cov_robust, res2.cov, rtol=rtol)
def test_tests(self):
# Note: differences between small (t-distribution, ddof) and large (normal)
# F statistic has no ddof correction in large, but uses F distribution (?)
res1 = self.res1
res2 = self.res2
rtol = getattr(self, 'rtol', 1e-10)
rtolh = getattr(self, 'rtolh', 1e-12)
mat = np.eye(len(res1.params))
tt = res1.t_test(mat, cov_p=self.cov_robust)
# has 'effect', 'pvalue', 'sd', 'tvalue'
# TODO confint missing
assert_allclose(tt.effect, res2.params, rtol=rtol)
assert_allclose(tt.sd, res2.bse, rtol=rtol)
assert_allclose(tt.tvalue, res2.tvalues, rtol=rtol)
if self.small:
assert_allclose(tt.pvalue, res2.pvalues, rtol=5 * rtol)
else:
pval = stats.norm.sf(np.abs(tt.tvalue)) * 2
assert_allclose(pval, res2.pvalues, rtol=5 * rtol, atol=1e-25)
ft = res1.f_test(mat[:-1], cov_p=self.cov_robust)
if self.small:
#'df_denom', 'df_num', 'fvalue', 'pvalue'
assert_allclose(ft.fvalue, res2.F, rtol=rtol)
# f-pvalue is not directly available in Stata results, but is in ivreg2
if hasattr(res2, 'Fp'):
assert_allclose(ft.pvalue, res2.Fp, rtol=rtol)
else:
if not getattr(self, 'skip_f', False):
dof_corr = res1.df_resid * 1. / res1.nobs
assert_allclose(ft.fvalue * dof_corr, res2.F, rtol=rtol)
if hasattr(res2, 'df_r'):
assert_equal(ft.df_num, res2.df_m)
assert_equal(ft.df_denom, res2.df_r)
else:
# ivreg2
assert_equal(ft.df_num, res2.Fdf1)
assert_equal(ft.df_denom, res2.Fdf2)
# SMOKE
tt.summary()
ft.summary()
tt.summary_frame()
class TestOLSRobust1(CheckOLSRobust):
# compare with regress robust
def setup(self):
res_ols = self.res1
self.bse_robust = res_ols.HC1_se
self.cov_robust = res_ols.cov_HC1
self.small = True
self.res2 = res.results_hc0
@classmethod
def setup_class(cls):
d2 = macrodata.load().data
g_gdp = 400*np.diff(np.log(d2['realgdp']))
g_inv = 400*np.diff(np.log(d2['realinv']))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False)
cls.res1 = res_ols = OLS(g_inv, exogg).fit()
class TestOLSRobust2(TestOLSRobust1):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1
self.bse_robust = res_ols.HC1_se
self.cov_robust = res_ols.cov_HC1
self.small = True
self.res2 = res.results_ivhc0_small
class TestOLSRobust3(TestOLSRobust1):
# compare with ivreg robust (not small)
def setup(self):
res_ols = self.res1
self.bse_robust = res_ols.HC0_se
self.cov_robust = res_ols.cov_HC0
self.small = False
self.res2 = res.results_ivhc0_large
class TestOLSRobustHacSmall(TestOLSRobust1):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1
cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust = se1
self.cov_robust = cov1
self.small = True
self.res2 = res.results_ivhac4_small
class TestOLSRobustHacLarge(TestOLSRobust1):
# compare with ivreg robust (not small)
def setup(self):
res_ols = self.res1
cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust = se1
self.cov_robust = cov1
self.small = False
self.res2 = res.results_ivhac4_large
class CheckOLSRobustNewMixin(object):
# This uses the robust covariance as default covariance
def test_compare(self):
rtol = getattr(self, 'rtol', 1e-10)
assert_allclose(self.cov_robust, self.cov_robust2, rtol=rtol)
assert_allclose(self.bse_robust, self.bse_robust2, rtol=rtol)
def test_fvalue(self):
if not getattr(self, 'skip_f', False):
rtol = getattr(self, 'rtol', 1e-10)
assert_allclose(self.res1.fvalue, self.res2.F, rtol=rtol)
if hasattr(self.res2, 'Fp'):
#only available with ivreg2
assert_allclose(self.res1.f_pvalue, self.res2.Fp, rtol=rtol)
def test_confint(self):
rtol = getattr(self, 'rtol', 1e-10)
ci1 = self.res1.conf_int()
ci2 = self.res2.params_table[:,4:6]
assert_allclose(ci1, ci2, rtol=rtol)
# check critical value
crit1 = np.diff(ci1, 1).ravel() / 2 / self.res1.bse
crit2 = np.diff(ci1, 1).ravel() / 2 / self.res1.bse
assert_allclose(crit1, crit2, rtol=12)
def test_ttest(self):
res1 = self.res1
res2 = self.res2
rtol = getattr(self, 'rtol', 1e-10)
rtolh = getattr(self, 'rtol', 1e-12)
mat = np.eye(len(res1.params))
tt = res1.t_test(mat, cov_p=self.cov_robust)
# has 'effect', 'pvalue', 'sd', 'tvalue'
# TODO confint missing
assert_allclose(tt.effect, res2.params, rtol=rtolh)
assert_allclose(tt.sd, res2.bse, rtol=rtol)
assert_allclose(tt.tvalue, res2.tvalues, rtol=rtolh)
assert_allclose(tt.pvalue, res2.pvalues, rtol=5 * rtol)
ci1 = tt.conf_int()
ci2 = self.res2.params_table[:,4:6]
assert_allclose(ci1, ci2, rtol=rtol)
def test_scale(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-5
# Note we always use df_resid for scale
# Stata uses nobs or df_resid for rmse, not always available in Stata
#assert_allclose(res1.scale, res2.rmse**2 * res2.N / (res2.N - res2.df_m - 1), rtol=rtol)
skip = False
if hasattr(res2, 'rss'):
scale = res2.rss / (res2.N - res2.df_m - 1)
elif hasattr(res2, 'rmse'):
scale = res2.rmse**2
else:
skip = True
if isinstance(res1.model, WLS):
skip = True
# Stata uses different scaling and using unweighted resid for rmse
if not skip:
assert_allclose(res1.scale, scale, rtol=rtol)
if not res2.vcetype == 'Newey-West':
# no rsquared in Stata
r2 = res2.r2 if hasattr(res2, 'r2') else res2.r2c
assert_allclose(res1.rsquared, r2, rtol=rtol, err_msg=str(skip))
# consistency checks, not against Stata
df_resid = res1.nobs - res1.df_model - 1
assert_equal(res1.df_resid, df_resid)
# variance of resid_pearson is 1, with ddof, and loc=0
psum = (res1.resid_pearson**2).sum()
assert_allclose(psum, df_resid, rtol=1e-13)
def test_smoke(self):
self.res1.summary()
class TestOLSRobust2SmallNew(TestOLSRobust1, CheckOLSRobustNewMixin):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1.get_robustcov_results('HC1', use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
self.bse_robust2 = res_ols.HC1_se
self.cov_robust2 = res_ols.cov_HC1
self.small = True
self.res2 = res.results_ivhc0_small
def test_compare(self):
#check that we get a warning using the nested compare methods
res1 = self.res1
endog = res1.model.endog
exog = res1.model.exog[:, [0, 2]] # drop one variable
res_ols2 = OLS(endog, exog).fit()
# results from Stata
r_pval = .0307306938402991
r_chi2 = 4.667944083588736
r_df = 1
assert_warns(InvalidTestWarning, res1.compare_lr_test, res_ols2)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
chi2, pval, df = res1.compare_lr_test(res_ols2)
assert_allclose(chi2, r_chi2, rtol=1e-11)
assert_allclose(pval, r_pval, rtol=1e-11)
assert_equal(df, r_df)
assert_warns(InvalidTestWarning, res1.compare_f_test, res_ols2)
#fva, pval, df = res1.compare_f_test(res_ols2)
class TestOLSRobustHACSmallNew(TestOLSRobust1, CheckOLSRobustNewMixin):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1.get_robustcov_results('HAC', maxlags=4,
use_correction=True, use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_hac_simple(res_ols, nlags=4, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res.results_ivhac4_small
class TestOLSRobust2LargeNew(TestOLSRobust1, CheckOLSRobustNewMixin):
# compare with ivreg robust small
def setup(self):
res_ols = self.res1.get_robustcov_results('HC0')
res_ols.use_t = False
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
self.bse_robust2 = res_ols.HC0_se
self.cov_robust2 = res_ols.cov_HC0
self.small = False
self.res2 = res.results_ivhc0_large
# TODO: skipping next two for now, not refactored yet for `large`
def test_fvalue(self):
pass
def test_confint(self):
pass
#######################################################
# cluster robust standard errors
#######################################################
class CheckOLSRobustCluster(CheckOLSRobust):
# compare with regress robust
@classmethod
def setup_class(cls):
#import pandas as pa
from statsmodels.datasets import grunfeld
dtapa = grunfeld.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False)
#asserts don't work for pandas
cls.res1 = OLS(dtapa_endog, exog).fit()
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
cls.groups = firm_id
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
cls.time = np.squeeze(time).astype(int)
# nw_panel function requires interval bounds
cls.tidx = [(i*20, 20*(i+1)) for i in range(10)]
class TestOLSRobustCluster2(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster
self.rtol = 1e-6
self.rtolh = 1e-10
class TestOLSRobustCluster2Fit(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# copy, past uses fit method
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.model.fit(cov_type='cluster',
cov_kwds=dict(
groups=self.groups,
use_correction=True,
use_t=True))
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster
self.rtol = 1e-6
self.rtolh = 1e-10
def test_basic_inference(self):
res1 = self.res1
res2 = self.res2
rtol = 1e-7
assert_allclose(res1.params, res2.params, rtol=1e-8)
assert_allclose(res1.bse, res2.bse, rtol=rtol)
assert_allclose(res1.pvalues, res2.pvalues, rtol=rtol, atol=1e-20)
ci = res2.params_table[:, 4:6]
assert_allclose(res1.conf_int(), ci, rtol=5e-7, atol=1e-20)
class TestOLSRobustCluster2Large(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=False,
use_t=False,
df_correction=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_large
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
# skipping see https://github.com/statsmodels/statsmodels/pull/1189#issuecomment-29141741
def test_f_value(self):
pass
class TestOLSRobustCluster2LargeFit(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
model = OLS(self.res1.model.endog, self.res1.model.exog)
#res_ols = self.res1.model.fit(cov_type='cluster',
res_ols = model.fit(cov_type='cluster',
cov_kwds=dict(groups=self.groups,
use_correction=False,
use_t=False,
df_correction=True))
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_large
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
# skipping see https://github.com/statsmodels/statsmodels/pull/1189#issuecomment-29141741
def t_est_fvalue(self):
pass
class TestOLSRobustClusterGS(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('nw-groupsum',
time=self.time,
maxlags=4,
use_correction=False,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_nw_groupsum(self.res1, 4, self.time, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_nw_groupsum4
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestOLSRobustClusterGSFit(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.model.fit(cov_type='nw-groupsum',
cov_kwds=dict(time=self.time,
maxlags=4,
use_correction=False,
use_t=True))
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_nw_groupsum(self.res1, 4, self.time, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_nw_groupsum4
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestOLSRobustClusterNWP(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('nw-panel',
time=self.time,
maxlags=4,
use_correction='hac',
use_t=True,
df_correction=False)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_nw_panel(self.res1, 4, self.tidx)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_nw_panel4
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
# TODO: low precision/agreement
class TestOLSRobustCluster2G(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=(self.groups, self.time),
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster_2groups(self.res1, self.groups, group2=self.time,
use_correction=True)[0]
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster_2groups_small
self.rtol = 0.35 # only f_pvalue and confint for constant differ >rtol=0.05
self.rtolh = 1e-10
class TestOLSRobustCluster2GLarge(CheckOLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=(self.groups, self.time),
use_correction=False, #True,
use_t=False)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster_2groups(self.res1, self.groups, group2=self.time,
use_correction=False)[0]
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_2groups_large
self.skip_f = True
self.rtol = 1e-7
self.rtolh = 1e-10
######################################
# WLS
######################################
class CheckWLSRobustCluster(CheckOLSRobust):
# compare with regress robust
@classmethod
def setup_class(cls):
#import pandas as pa
from statsmodels.datasets import grunfeld
dtapa = grunfeld.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False)
#asserts don't work for pandas
cls.res1 = WLS(dtapa_endog, exog, weights=1/dtapa_exog['value']).fit()
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
cls.groups = firm_id
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
cls.time = np.squeeze(time).astype(int)
# nw_panel function requires interval bounds
cls.tidx = [(i*20, 20*(i+1)) for i in range(10)]
# not available yet for WLS
class T_estWLSRobustCluster2(CheckWLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=True,
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=True)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_cluster_wls_small
self.rtol = 1e-6
self.rtolh = 1e-10
# not available yet for WLS
class T_estWLSRobustCluster2Large(CheckWLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('cluster',
groups=self.groups,
use_correction=False,
use_t=False,
df_correction=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = False
self.res2 = res2.results_cluster_wls_large
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestWLSRobustSmall(CheckWLSRobustCluster, CheckOLSRobustNewMixin):
# compare with `reg cluster`
def setup(self):
res_ols = self.res1.get_robustcov_results('HC1',
use_t=True)
self.res3 = self.res1
self.res1 = res_ols
self.bse_robust = res_ols.bse
self.cov_robust = res_ols.cov_params()
#TODO: check standalone function
#cov1 = sw.cov_cluster(self.res1, self.groups, use_correction=False)
cov1 = res_ols.cov_HC1
se1 = sw.se_cov(cov1)
self.bse_robust2 = se1
self.cov_robust2 = cov1
self.small = True
self.res2 = res2.results_hc1_wls_small
self.skip_f = True
self.rtol = 1e-6
self.rtolh = 1e-10
class TestWLSOLSRobustSmall(object):
@classmethod
def setup_class(cls):
#import pandas as pa
from statsmodels.datasets import grunfeld
dtapa = grunfeld.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
exog = add_constant(dtapa_exog[['value', 'capital']], prepend=False)
#asserts don't work for pandas
cls.res_wls = WLS(dtapa_endog, exog, weights=1/dtapa_exog['value']).fit()
w_sqrt = 1 / np.sqrt(np.asarray(dtapa_exog['value']))
cls.res_ols = OLS(dtapa_endog * w_sqrt,
np.asarray(exog) * w_sqrt[:, None]).fit() # hasconst=True ?
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
cls.groups = firm_id
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
cls.time = np.squeeze(time).astype(int)
# nw_panel function requires interval bounds
cls.tidx = [(i*20, 20*(i+1)) for i in range(10)]
def test_all(self):
all_cov = [('HC0', dict(use_t=True)),
('HC1', dict(use_t=True)),
('HC2', dict(use_t=True)),
('HC3', dict(use_t=True))]
# fvalue are not the same
#res_ols = self.res_ols
#res_wls = self.res_wls
#assert_allclose(res_ols.fvalue, res_wls.fvalue, rtol=1e-13)
for cov_type, kwds in all_cov:
res1 = self.res_ols.get_robustcov_results(cov_type, **kwds)
res2 = self.res_wls.get_robustcov_results(cov_type, **kwds)
assert_allclose(res1.params, res2.params, rtol=1e-13)
assert_allclose(res1.cov_params(), res2.cov_params(), rtol=1e-13)
assert_allclose(res1.bse, res2.bse, rtol=1e-13)
assert_allclose(res1.pvalues, res2.pvalues, rtol=1e-13)
#Note: Fvalue doesn't match up, difference in calculation ?
# The only difference should be in the constant detection
#assert_allclose(res1.fvalue, res2.fvalue, rtol=1e-13)
#assert_allclose(res1.f_value, res2.f_pvalue, rtol=1e-13)
mat = np.eye(len(res1.params))
ft1 = res1.f_test(mat)
ft2 = res2.f_test(mat)
assert_allclose(ft1.fvalue, ft2.fvalue, rtol=1e-13)
assert_allclose(ft1.pvalue, ft2.pvalue, rtol=1e-12)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.