repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rbharath/deepchem | examples/binding_pockets/binding_pocket_datasets.py | 9 | 6311 | """
PDBBind binding pocket dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
import shutil
import time
import re
from rdkit import Chem
import deepchem as dc
def compute_binding_pocket_features(pocket_featurizer, ligand_featurizer,
pdb_subdir, pdb_code, threshold=.3):
"""Compute features for a given complex"""
protein_file = os.path.join(pdb_subdir, "%s_protein.pdb" % pdb_code)
ligand_file = os.path.join(pdb_subdir, "%s_ligand.sdf" % pdb_code)
ligand_mol2 = os.path.join(pdb_subdir, "%s_ligand.mol2" % pdb_code)
# Extract active site
active_site_box, active_site_atoms, active_site_coords = (
dc.dock.binding_pocket.extract_active_site(
protein_file, ligand_file))
# Featurize ligand
mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
if mol is None:
return None, None
# Default for CircularFingerprint
n_ligand_features = 1024
ligand_features = ligand_featurizer.featurize([mol])
# Featurize pocket
finder = dc.dock.ConvexHullPocketFinder()
pockets, pocket_atoms, pocket_coords = finder.find_pockets(protein_file, ligand_file)
n_pockets = len(pockets)
n_pocket_features = dc.feat.BindingPocketFeaturizer.n_features
features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
pocket_features = pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms, pocket_coords)
# Note broadcast operation
features[:, :n_pocket_features] = pocket_features
features[:, n_pocket_features:] = ligand_features
# Compute labels for pockets
labels = np.zeros(n_pockets)
pocket_atoms[active_site_box] = active_site_atoms
for ind, pocket in enumerate(pockets):
overlap = dc.dock.binding_pocket.compute_overlap(
pocket_atoms, active_site_box, pocket)
if overlap > threshold:
labels[ind] = 1
else:
labels[ind] = 0
return features, labels
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe"""
# Some complexes have labels but no PDB files. Filter these manually
missing_pdbs = ["1d2v", "1jou", "1s8j", "1cam", "4mlt", "4o7d"]
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
# Some of the ligand-names are of form (FMN ox). Use regex
# to merge into form (FMN-ox)
p = re.compile('\(([^\)\s]*) ([^\)\s]*)\)')
line = p.sub('(\\1-\\2)', line)
elts = line.split()
# Filter if missing PDB files
if elts[0] in missing_pdbs:
continue
contents.append(elts)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
def featurize_pdbbind_pockets(data_dir=None, subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["active-site"]
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, "%s_pockets" % (subset))
if os.path.exists(data_dir):
return dc.data.DiskDataset(data_dir), tasks
pdbbind_dir = os.path.join(current_dir, "../pdbbind/v2015")
# Load PDBBind dataset
if subset == "core":
labels_file = os.path.join(pdbbind_dir, "INDEX_core_data.2013")
elif subset == "refined":
labels_file = os.path.join(pdbbind_dir, "INDEX_refined_data.2015")
elif subset == "full":
labels_file = os.path.join(pdbbind_dir, "INDEX_general_PL_data.2015")
else:
raise ValueError("Only core, refined, and full subsets supported.")
print("About to load contents.")
if not os.path.exists(labels_file):
raise ValueError("Run ../pdbbind/get_pdbbind.sh to download dataset.")
contents_df = load_pdbbind_labels(labels_file)
ids = contents_df["PDB code"].values
y = np.array([float(val) for val in contents_df["-logKd/Ki"].values])
# Define featurizers
pocket_featurizer = dc.feat.BindingPocketFeaturizer()
ligand_featurizer = dc.feat.CircularFingerprint(size=1024)
# Featurize Dataset
all_features = []
all_labels = []
missing_pdbs = []
all_ids = []
time1 = time.time()
for ind, pdb_code in enumerate(ids):
print("Processing complex %d, %s" % (ind, str(pdb_code)))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
if not os.path.exists(pdb_subdir):
print("%s is missing!" % pdb_subdir)
missing_pdbs.append(pdb_subdir)
continue
features, labels = compute_binding_pocket_features(
pocket_featurizer, ligand_featurizer, pdb_subdir, pdb_code)
if features is None:
print("Featurization failed!")
continue
all_features.append(features)
all_labels.append(labels)
ids = np.array(["%s%d" % (pdb_code, i) for i in range(len(labels))])
all_ids.append(ids)
time2 = time.time()
print("TIMING: PDBBind Pocket Featurization took %0.3f s" % (time2-time1))
X = np.vstack(all_features)
y = np.concatenate(all_labels)
w = np.ones_like(y)
ids = np.concatenate(all_ids)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids, data_dir=data_dir)
return dataset, tasks
def load_pdbbind_pockets(split="index", subset="core"):
"""Load PDBBind datasets. Does not do train/test split"""
dataset, tasks = featurize_pdbbind_pockets(subset=subset)
splitters = {'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter()}
splitter = splitters[split]
########################################################### DEBUG
print("dataset.X.shape")
print(dataset.X.shape)
print("dataset.y.shape")
print(dataset.y.shape)
print("dataset.w.shape")
print(dataset.w.shape)
print("dataset.ids.shape")
print(dataset.ids.shape)
########################################################### DEBUG
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
return tasks, (train, valid, test), transformers
| mit |
ysekky/GPy | GPy/plotting/abstract_plotting_library.py | 6 | 13998 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.abstract_plotting_library nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
#===============================================================================
# Make sure that the necessary files and functions are
# defined in the plotting library:
class AbstractPlottingLibrary(object):
def __init__(self):
"""
Set the defaults dictionary in the _defaults variable:
E.g. for matplotlib we define a file defaults.py and
set the dictionary of it here:
from . import defaults
_defaults = defaults.__dict__
"""
self._defaults = {}
self.__defaults = None
@property
def defaults(self):
#===============================================================================
if self.__defaults is None:
from collections import defaultdict
class defaultdict(defaultdict):
def __getattr__(self, *args, **kwargs):
return defaultdict.__getitem__(self, *args, **kwargs)
self.__defaults = defaultdict(dict, self._defaults)
return self.__defaults
#===============================================================================
def figure(self, nrows, ncols, **kwargs):
"""
Get a new figure with nrows and ncolumns subplots.
Does not initialize the canvases yet.
There is individual kwargs for the individual plotting libraries to use.
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def new_canvas(self, figure=None, col=1, row=1, projection='2d', xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, **kwargs):
"""
Return a canvas, kwargupdate for your plotting library.
if figure is not None, create a canvas in the figure
at subplot position (col, row).
This method does two things, it creates an empty canvas
and updates the kwargs (deletes the unnecessary kwargs)
for further usage in normal plotting.
the kwargs are plotting library specific kwargs!
:param {'2d'|'3d'} projection: The projection to use.
E.g. in matplotlib this means it deletes references to ax, as
plotting is done on the axis itself and is not a kwarg.
:param xlabel: the label to put on the xaxis
:param ylabel: the label to put on the yaxis
:param zlabel: the label to put on the zaxis (if plotting in 3d)
:param title: the title of the plot
:param legend: if True, plot a legend, if int make legend rows in the legend
:param (float, float) xlim: the limits for the xaxis
:param (float, float) ylim: the limits for the yaxis
:param (float, float) zlim: the limits for the zaxis (if plotting in 3d)
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def add_to_canvas(self, canvas, plots, legend=True, title=None, **kwargs):
"""
Add plots is a dictionary with the plots as the
items or a list of plots as items to canvas.
The kwargs are plotting library specific kwargs!
E.g. in matplotlib this does not have to do anything to add stuff, but
we set the legend and title.
!This function returns the updated canvas!
:param title: the title of the plot
:param legend: whether to plot a legend or not
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def show_canvas(self, canvas, **kwargs):
"""
Draw/Plot the canvas given.
"""
raise NotImplementedError
def plot(self, cavas, X, Y, Z=None, color=None, label=None, **kwargs):
"""
Make a line plot from for Y on X (Y = f(X)) on the canvas.
If Z is not None, plot in 3d!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def plot_axis_lines(self, ax, X, color=None, label=None, **kwargs):
"""
Plot lines at the bottom (lower boundary of yaxis) of the axis at input location X.
If X is two dimensional, plot in 3d and connect the axis lines to the bottom of the Z axis.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def surface(self, canvas, X, Y, Z, color=None, label=None, **kwargs):
"""
Plot a surface for 3d plotting for the inputs (X, Y, Z).
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def scatter(self, canvas, X, Y, Z=None, color=None, vmin=None, vmax=None, label=None, **kwargs):
"""
Make a scatter plot between X and Y on the canvas given.
the kwargs are plotting library specific kwargs!
:param canvas: the plotting librarys specific canvas to plot on.
:param array-like X: the inputs to plot.
:param array-like Y: the outputs to plot.
:param array-like Z: the Z level to plot (if plotting 3d).
:param array-like c: the colorlevel for each point.
:param float vmin: minimum colorscale
:param float vmax: maximum colorscale
:param kwargs: the specific kwargs for your plotting library
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=None, label=None, **kwargs):
"""
Plot vertical bar plot centered at x with height
and width of bars. The y level is at bottom.
the kwargs are plotting library specific kwargs!
:param array-like x: the center points of the bars
:param array-like height: the height of the bars
:param array-like width: the width of the bars
:param array-like bottom: the start y level of the bars
:param kwargs: kwargs for the specific library you are using.
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make an errorbar along the xaxis for points at (X,Y) on the canvas.
if error is two dimensional, the lower error is error[:,0] and
the upper error is error[:,1]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def yerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make errorbars along the yaxis on the canvas given.
if error is two dimensional, the lower error is error[0, :] and
the upper error is error[1, :]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def imshow(self, canvas, X, extent=None, label=None, vmin=None, vmax=None, **kwargs):
"""
Show the image stored in X on the canvas.
The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def imshow_interact(self, canvas, plot_function, extent=None, label=None, vmin=None, vmax=None, **kwargs):
"""
This function is optional!
Create an imshow controller to stream
the image returned by the plot_function. There is an imshow controller written for
mmatplotlib, which updates the imshow on changes in axis.
The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def annotation_heatmap(self, canvas, X, annotation, extent, label=None, **kwargs):
"""
Plot an annotation heatmap. That is like an imshow, but
put the text of the annotation inside the cells of the heatmap (centered).
:param canvas: the canvas to plot on
:param array-like annotation: the annotation labels for the heatmap
:param [horizontal_min,horizontal_max,vertical_min,vertical_max] extent: the extent of where to place the heatmap
:param str label: the label for the heatmap
:return: a list of both the heatmap and annotation plots [heatmap, annotation], or the interactive update object (alone)
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def annotation_heatmap_interact(self, canvas, plot_function, extent, label=None, resolution=15, **kwargs):
"""
if plot_function is not None, return an interactive updated
heatmap, which updates on axis events, so that one can zoom in
and out and the heatmap gets updated. See the matplotlib implementation
in matplot_dep.controllers.
the plot_function returns a pair (X, annotation) to plot, when called with
a new input X (which would be the grid, which is visible on the plot
right now)
:param canvas: the canvas to plot on
:param array-like annotation: the annotation labels for the heatmap
:param [horizontal_min,horizontal_max,vertical_min,vertical_max] extent: the extent of where to place the heatmap
:param str label: the label for the heatmap
:return: a list of both the heatmap and annotation plots [heatmap, annotation], or the interactive update object (alone)
:param plot_function: the function, which generates new data for given input locations X
:param int resolution: the resolution of the interactive plot redraw - this is only needed when giving a plot_function
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def contour(self, canvas, X, Y, C, Z=None, color=None, label=None, **kwargs):
"""
Make a contour plot at (X, Y) with heights/colors stored in C on the canvas.
if Z is not None: make 3d contour plot at (X, Y, Z) with heights/colors stored in C on the canvas.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def fill_between(self, canvas, X, lower, upper, color=None, label=None, **kwargs):
"""
Fill along the xaxis between lower and upper.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def fill_gradient(self, canvas, X, percentiles, color=None, label=None, **kwargs):
"""
Plot a gradient (in alpha values) for the given percentiles.
the kwargs are plotting library specific kwargs!
"""
print("fill_gradient not implemented in this backend.")
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/IPython/core/completer.py | 3 | 45462 | # encoding: utf-8
"""Word completion for IPython.
This module is a fork of the rlcompleter module in the Python standard
library. The original enhancements made to rlcompleter have been sent
upstream and were accepted as of Python 2.3, but we need a lot more
functionality specific to IPython, so this module will continue to live as an
IPython-specific utility.
Original rlcompleter documentation:
This requires the latest extension to the readline module (the
completes keywords, built-ins and globals in __main__; when completing
NAME.NAME..., it evaluates (!) the expression up to the last dot and
completes its attributes.
It's very cool to do "import string" type "string.", hit the
completion key (twice), and see the list of names defined by the
string module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and
generally cause the completion to fail). This is a feature -- since
readline sets the tty device in raw (or cbreak) mode, printing a
traceback wouldn't work well without some complicated hoopla to save,
reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary
application defined code to be executed if an object with a
``__getattr__`` hook is found. Since it is the responsibility of the
application (or the user) to enable this feature, I consider this an
acceptable risk. More complicated expressions (e.g. function calls or
indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#
# Some of this code originated from rlcompleter in the Python standard library
# Copyright (C) 2001 Python Software Foundation, www.python.org
import __main__
import glob
import inspect
import itertools
import keyword
import os
import re
import sys
import unicodedata
import string
from traitlets.config.configurable import Configurable
from IPython.core.error import TryNext
from IPython.core.inputsplitter import ESC_MAGIC
from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
from IPython.utils import generics
from IPython.utils import io
from IPython.utils.decorators import undoc
from IPython.utils.dir2 import dir2
from IPython.utils.process import arg_split
from IPython.utils.py3compat import builtin_mod, string_types, PY3
from traitlets import CBool, Enum
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Public API
__all__ = ['Completer','IPCompleter']
if sys.platform == 'win32':
PROTECTABLES = ' '
else:
PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
#-----------------------------------------------------------------------------
# Main functions and classes
#-----------------------------------------------------------------------------
def has_open_quotes(s):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either type in
the string is odd.
Returns
-------
If there is an open quote, the quote character is returned. Else, return
False.
"""
# We check " first, then ', so complex cases with nested quotes will get
# the " to take precedence.
if s.count('"') % 2:
return '"'
elif s.count("'") % 2:
return "'"
else:
return False
def protect_filename(s):
"""Escape a string to protect certain characters."""
return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
for ch in s])
def expand_user(path):
"""Expand '~'-style usernames in strings.
This is similar to :func:`os.path.expanduser`, but it computes and returns
extra information that will be useful if the input was being used in
computing completions, and you wish to return the completions with the
original '~' instead of its expanded value.
Parameters
----------
path : str
String to be expanded. If no ~ is present, the output is the same as the
input.
Returns
-------
newpath : str
Result of ~ expansion in the input path.
tilde_expand : bool
Whether any expansion was performed or not.
tilde_val : str
The value that ~ was replaced with.
"""
# Default values
tilde_expand = False
tilde_val = ''
newpath = path
if path.startswith('~'):
tilde_expand = True
rest = len(path)-1
newpath = os.path.expanduser(path)
if rest:
tilde_val = newpath[:-rest]
else:
tilde_val = newpath
return newpath, tilde_expand, tilde_val
def compress_user(path, tilde_expand, tilde_val):
"""Does the opposite of expand_user, with its outputs.
"""
if tilde_expand:
return path.replace(tilde_val, '~')
else:
return path
def penalize_magics_key(word):
"""key for sorting that penalizes magic commands in the ordering
Normal words are left alone.
Magic commands have the initial % moved to the end, e.g.
%matplotlib is transformed as follows:
%matplotlib -> matplotlib%
[The choice of the final % is arbitrary.]
Since "matplotlib" < "matplotlib%" as strings,
"timeit" will appear before the magic "%timeit" in the ordering
For consistency, move "%%" to the end, so cell magics appear *after*
line magics with the same name.
A check is performed that there are no other "%" in the string;
if there are, then the string is not a magic command and is left unchanged.
"""
# Move any % signs from start to end of the key
# provided there are no others elsewhere in the string
if word[:2] == "%%":
if not "%" in word[2:]:
return word[2:] + "%%"
if word[:1] == "%":
if not "%" in word[1:]:
return word[1:] + "%"
return word
@undoc
class Bunch(object): pass
DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
GREEDY_DELIMS = ' =\r\n'
class CompletionSplitter(object):
"""An object to split an input line in a manner similar to readline.
By having our own implementation, we can expose readline-like completion in
a uniform manner to all frontends. This object only needs to be given the
line of text to be split and the cursor position on said line, and it
returns the 'word' to be completed on at the cursor after splitting the
entire line.
What characters are used as splitting delimiters can be controlled by
setting the `delims` attribute (this is a property that internally
automatically builds the necessary regular expression)"""
# Private interface
# A string of delimiter characters. The default value makes sense for
# IPython's most typical usage patterns.
_delims = DELIMS
# The expression (a normal string) to be compiled into a regular expression
# for actual splitting. We store it as an attribute mostly for ease of
# debugging, since this type of code can be so tricky to debug.
_delim_expr = None
# The regular expression that does the actual splitting
_delim_re = None
def __init__(self, delims=None):
delims = CompletionSplitter._delims if delims is None else delims
self.delims = delims
@property
def delims(self):
"""Return the string of delimiter characters."""
return self._delims
@delims.setter
def delims(self, delims):
"""Set the delimiters for line splitting."""
expr = '[' + ''.join('\\'+ c for c in delims) + ']'
self._delim_re = re.compile(expr)
self._delims = delims
self._delim_expr = expr
def split_line(self, line, cursor_pos=None):
"""Split a line of text with a cursor at the given position.
"""
l = line if cursor_pos is None else line[:cursor_pos]
return self._delim_re.split(l)[-1]
class Completer(Configurable):
greedy = CBool(False, config=True,
help="""Activate greedy completion
This will enable completion on elements of lists, results of function calls, etc.,
but can be unsafe because the code is actually evaluated on TAB.
"""
)
def __init__(self, namespace=None, global_namespace=None, **kwargs):
"""Create a new completer for the command line.
Completer(namespace=ns,global_namespace=ns2) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
super(Completer, self).__init__(**kwargs)
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
#print 'Completer->global_matches, txt=%r' % text # dbg
matches = []
match_append = matches.append
n = len(text)
for lst in [keyword.kwlist,
builtin_mod.__dict__.keys(),
self.namespace.keys(),
self.global_namespace.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
#io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if m:
expr, attr = m.group(1, 3)
elif self.greedy:
m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
if not m2:
return []
expr, attr = m2.group(1,2)
else:
return []
try:
obj = eval(expr, self.namespace)
except:
try:
obj = eval(expr, self.global_namespace)
except:
return []
if self.limit_to__all__ and hasattr(obj, '__all__'):
words = get__all__entries(obj)
else:
words = dir2(obj)
try:
words = generics.complete_object(obj, words)
except TryNext:
pass
except Exception:
# Silence errors from completion function
#raise # dbg
pass
# Build match list to return
n = len(attr)
res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
return res
def get__all__entries(obj):
"""returns the strings in the __all__ attribute"""
try:
words = getattr(obj, '__all__')
except:
return []
return [w for w in words if isinstance(w, string_types)]
def match_dict_keys(keys, prefix, delims):
"""Used by dict_key_matches, matching the prefix to a list of keys"""
if not prefix:
return None, 0, [repr(k) for k in keys
if isinstance(k, (string_types, bytes))]
quote_match = re.search('["\']', prefix)
quote = quote_match.group()
try:
prefix_str = eval(prefix + quote, {})
except Exception:
return None, 0, []
pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
token_match = re.search(pattern, prefix, re.UNICODE)
token_start = token_match.start()
token_prefix = token_match.group()
# TODO: support bytes in Py3k
matched = []
for key in keys:
try:
if not key.startswith(prefix_str):
continue
except (AttributeError, TypeError, UnicodeError):
# Python 3+ TypeError on b'a'.startswith('a') or vice-versa
continue
# reformat remainder of key to begin with prefix
rem = key[len(prefix_str):]
# force repr wrapped in '
rem_repr = repr(rem + '"')
if rem_repr.startswith('u') and prefix[0] not in 'uU':
# Found key is unicode, but prefix is Py2 string.
# Therefore attempt to interpret key as string.
try:
rem_repr = repr(rem.encode('ascii') + '"')
except UnicodeEncodeError:
continue
rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
if quote == '"':
# The entered prefix is quoted with ",
# but the match is quoted with '.
# A contained " hence needs escaping for comparison:
rem_repr = rem_repr.replace('"', '\\"')
# then reinsert prefix from start of token
matched.append('%s%s' % (token_prefix, rem_repr))
return quote, token_start, matched
def _safe_isinstance(obj, module, class_name):
"""Checks if obj is an instance of module.class_name if loaded
"""
return (module in sys.modules and
isinstance(obj, getattr(__import__(module), class_name)))
def back_unicode_name_matches(text):
u"""Match unicode characters back to unicode name
This does ☃ -> \\snowman
Note that snowman is not a valid python3 combining character but will be expanded.
Though it will not recombine back to the snowman character by the completion machinery.
This will not either back-complete standard sequences like \\n, \\b ...
Used on Python 3 only.
"""
if len(text)<2:
return u'', ()
maybe_slash = text[-2]
if maybe_slash != '\\':
return u'', ()
char = text[-1]
# no expand on quote for completion in strings.
# nor backcomplete standard ascii keys
if char in string.ascii_letters or char in ['"',"'"]:
return u'', ()
try :
unic = unicodedata.name(char)
return '\\'+char,['\\'+unic]
except KeyError as e:
pass
return u'', ()
def back_latex_name_matches(text):
u"""Match latex characters back to unicode name
This does ->\\sqrt
Used on Python 3 only.
"""
if len(text)<2:
return u'', ()
maybe_slash = text[-2]
if maybe_slash != '\\':
return u'', ()
char = text[-1]
# no expand on quote for completion in strings.
# nor backcomplete standard ascii keys
if char in string.ascii_letters or char in ['"',"'"]:
return u'', ()
try :
latex = reverse_latex_symbol[char]
# '\\' replace the \ as well
return '\\'+char,[latex]
except KeyError as e:
pass
return u'', ()
class IPCompleter(Completer):
"""Extension of the completer class with IPython-specific features"""
def _greedy_changed(self, name, old, new):
"""update the splitter and readline delims when greedy is changed"""
if new:
self.splitter.delims = GREEDY_DELIMS
else:
self.splitter.delims = DELIMS
if self.readline:
self.readline.set_completer_delims(self.splitter.delims)
merge_completions = CBool(True, config=True,
help="""Whether to merge completion results into a single list
If False, only the completion results from the first non-empty
completer will be returned.
"""
)
omit__names = Enum((0,1,2), default_value=2, config=True,
help="""Instruct the completer to omit private method names
Specifically, when completing on ``object.<tab>``.
When 2 [default]: all names that start with '_' will be excluded.
When 1: all 'magic' names (``__foo__``) will be excluded.
When 0: nothing will be excluded.
"""
)
limit_to__all__ = CBool(default_value=False, config=True,
help="""Instruct the completer to use __all__ for the completion
Specifically, when completing on ``object.<tab>``.
When True: only those names in obj.__all__ will be included.
When False [default]: the __all__ attribute is ignored
"""
)
def __init__(self, shell=None, namespace=None, global_namespace=None,
use_readline=True, config=None, **kwargs):
"""IPCompleter() -> completer
Return a completer object suitable for use by the readline library
via readline.set_completer().
Inputs:
- shell: a pointer to the ipython shell itself. This is needed
because this completer knows about magic functions, and those can
only be accessed via the ipython instance.
- namespace: an optional dict where completions are performed.
- global_namespace: secondary optional dict for completions, to
handle cases (such as IPython embedded inside functions) where
both Python scopes are visible.
use_readline : bool, optional
If true, use the readline library. This completer can still function
without readline, though in that case callers must provide some extra
information on each call about the current line."""
self.magic_escape = ESC_MAGIC
self.splitter = CompletionSplitter()
# Readline configuration, only used by the rlcompleter method.
if use_readline:
# We store the right version of readline so that later code
import IPython.utils.rlineimpl as readline
self.readline = readline
else:
self.readline = None
# _greedy_changed() depends on splitter and readline being defined:
Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
config=config, **kwargs)
# List where completion matches will be stored
self.matches = []
self.shell = shell
# Regexp to split filenames with spaces in them
self.space_name_re = re.compile(r'([^\\] )')
# Hold a local ref. to glob.glob for speed
self.glob = glob.glob
# Determine if we are running on 'dumb' terminals, like (X)Emacs
# buffers, to avoid completion problems.
term = os.environ.get('TERM','xterm')
self.dumb_terminal = term in ['dumb','emacs']
# Special handling of backslashes needed in win32 platforms
if sys.platform == "win32":
self.clean_glob = self._clean_glob_win32
else:
self.clean_glob = self._clean_glob
#regexp to parse docstring for function signature
self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
#use this if positional argument name is also needed
#= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
# All active matcher routines for completion
self.matchers = [self.python_matches,
self.file_matches,
self.magic_matches,
self.python_func_kw_matches,
self.dict_key_matches,
]
def all_completions(self, text):
"""
Wrapper around the complete method for the benefit of emacs
and pydb.
"""
return self.complete(text)[1]
def _clean_glob(self,text):
return self.glob("%s*" % text)
def _clean_glob_win32(self,text):
return [f.replace("\\","/")
for f in self.glob("%s*" % text)]
def file_matches(self, text):
"""Match filenames, expanding ~USER type strings.
Most of the seemingly convoluted logic in this completer is an
attempt to handle filenames with spaces in them. And yet it's not
quite perfect, because Python's readline doesn't expose all of the
GNU readline details needed for this to be done correctly.
For a filename with a space in it, the printed completions will be
only the parts after what's already been typed (instead of the
full completions, as is normally done). I don't think with the
current (as of Python 2.3) Python readline it's possible to do
better."""
#io.rprint('Completer->file_matches: <%r>' % text) # dbg
# chars that require escaping with backslash - i.e. chars
# that readline treats incorrectly as delimiters, but we
# don't want to treat as delimiters in filename matching
# when escaped with backslash
if text.startswith('!'):
text = text[1:]
text_prefix = '!'
else:
text_prefix = ''
text_until_cursor = self.text_until_cursor
# track strings with open quotes
open_quotes = has_open_quotes(text_until_cursor)
if '(' in text_until_cursor or '[' in text_until_cursor:
lsplit = text
else:
try:
# arg_split ~ shlex.split, but with unicode bugs fixed by us
lsplit = arg_split(text_until_cursor)[-1]
except ValueError:
# typically an unmatched ", or backslash without escaped char.
if open_quotes:
lsplit = text_until_cursor.split(open_quotes)[-1]
else:
return []
except IndexError:
# tab pressed on empty line
lsplit = ""
if not open_quotes and lsplit != protect_filename(lsplit):
# if protectables are found, do matching on the whole escaped name
has_protectables = True
text0,text = text,lsplit
else:
has_protectables = False
text = os.path.expanduser(text)
if text == "":
return [text_prefix + protect_filename(f) for f in self.glob("*")]
# Compute the matches from the filesystem
m0 = self.clean_glob(text.replace('\\',''))
if has_protectables:
# If we had protectables, we need to revert our changes to the
# beginning of filename so that we don't double-write the part
# of the filename we have so far
len_lsplit = len(lsplit)
matches = [text_prefix + text0 +
protect_filename(f[len_lsplit:]) for f in m0]
else:
if open_quotes:
# if we have a string with an open quote, we don't need to
# protect the names at all (and we _shouldn't_, as it
# would cause bugs when the filesystem call is made).
matches = m0
else:
matches = [text_prefix +
protect_filename(f) for f in m0]
#io.rprint('mm', matches) # dbg
# Mark directories in input list by appending '/' to their names.
matches = [x+'/' if os.path.isdir(x) else x for x in matches]
return matches
def magic_matches(self, text):
"""Match magics"""
#print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
# Get all shell magics now rather than statically, so magics loaded at
# runtime show up too.
lsm = self.shell.magics_manager.lsmagic()
line_magics = lsm['line']
cell_magics = lsm['cell']
pre = self.magic_escape
pre2 = pre+pre
# Completion logic:
# - user gives %%: only do cell magics
# - user gives %: do both line and cell magics
# - no prefix: do both
# In other words, line magics are skipped if the user gives %% explicitly
bare_text = text.lstrip(pre)
comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
if not text.startswith(pre2):
comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
return comp
def python_matches(self,text):
"""Match attributes or global python names"""
#io.rprint('Completer->python_matches, txt=%r' % text) # dbg
if "." in text:
try:
matches = self.attr_matches(text)
if text.endswith('.') and self.omit__names:
if self.omit__names == 1:
# true if txt is _not_ a __ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\.__.*?__',txt) is None)
else:
# true if txt is _not_ a _ name, false otherwise:
no__name = (lambda txt:
re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
matches = filter(no__name, matches)
except NameError:
# catches <undefined attributes>.<tab>
matches = []
else:
matches = self.global_matches(text)
return matches
def _default_arguments_from_docstring(self, doc):
"""Parse the first line of docstring for call signature.
Docstring should be of the form 'min(iterable[, key=func])\n'.
It can also parse cython docstring of the form
'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
"""
if doc is None:
return []
#care only the firstline
line = doc.lstrip().splitlines()[0]
#p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
#'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = self.docstring_sig_re.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
#re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
ret += self.docstring_kwd_re.findall(s)
return ret
def _default_arguments(self, obj):
"""Return the list of default arguments of obj if it is callable,
or empty list otherwise."""
call_obj = obj
ret = []
if inspect.isbuiltin(obj):
pass
elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
if inspect.isclass(obj):
#for cython embededsignature=True the constructor docstring
#belongs to the object itself not __init__
ret += self._default_arguments_from_docstring(
getattr(obj, '__doc__', ''))
# for classes, check for __init__,__new__
call_obj = (getattr(obj, '__init__', None) or
getattr(obj, '__new__', None))
# for all others, check if they are __call__able
elif hasattr(obj, '__call__'):
call_obj = obj.__call__
ret += self._default_arguments_from_docstring(
getattr(call_obj, '__doc__', ''))
try:
args,_,_1,defaults = inspect.getargspec(call_obj)
if defaults:
ret+=args[-len(defaults):]
except TypeError:
pass
return list(set(ret))
def python_func_kw_matches(self,text):
"""Match named parameters (kwargs) of the last open function"""
if "." in text: # a parameter cannot be dotted
return []
try: regexp = self.__funcParamsRegex
except AttributeError:
regexp = self.__funcParamsRegex = re.compile(r'''
'.*?(?<!\\)' | # single quoted strings or
".*?(?<!\\)" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
# 1. find the nearest identifier that comes before an unclosed
# parenthesis before the cursor
# e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
tokens = regexp.findall(self.text_until_cursor)
tokens.reverse()
iterTokens = iter(tokens); openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(next(iterTokens))
if not isId(ids[-1]):
ids.pop(); break
if not next(iterTokens) == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = self.global_matches(ids[0])
else:
callableMatches = self.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
try:
namedArgs = self._default_arguments(eval(callableMatch,
self.namespace))
except:
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append("%s=" %namedArg)
return argMatches
def dict_key_matches(self, text):
"Match string keys in a dictionary, after e.g. 'foo[' "
def get_keys(obj):
# Only allow completion for known in-memory dict-like types
if isinstance(obj, dict) or\
_safe_isinstance(obj, 'pandas', 'DataFrame'):
try:
return list(obj.keys())
except Exception:
return []
elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
_safe_isinstance(obj, 'numpy', 'void'):
return obj.dtype.names or []
return []
try:
regexps = self.__dict_key_regexps
except AttributeError:
dict_key_re_fmt = r'''(?x)
( # match dict-referring expression wrt greedy setting
%s
)
\[ # open bracket
\s* # and optional whitespace
([uUbB]? # string prefix (r not handled)
(?: # unclosed string
'(?:[^']|(?<!\\)\\')*
|
"(?:[^"]|(?<!\\)\\")*
)
)?
$
'''
regexps = self.__dict_key_regexps = {
False: re.compile(dict_key_re_fmt % '''
# identifiers separated by .
(?!\d)\w+
(?:\.(?!\d)\w+)*
'''),
True: re.compile(dict_key_re_fmt % '''
.+
''')
}
match = regexps[self.greedy].search(self.text_until_cursor)
if match is None:
return []
expr, prefix = match.groups()
try:
obj = eval(expr, self.namespace)
except Exception:
try:
obj = eval(expr, self.global_namespace)
except Exception:
return []
keys = get_keys(obj)
if not keys:
return keys
closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
if not matches:
return matches
# get the cursor position of
# - the text being completed
# - the start of the key text
# - the start of the completion
text_start = len(self.text_until_cursor) - len(text)
if prefix:
key_start = match.start(2)
completion_start = key_start + token_offset
else:
key_start = completion_start = match.end()
# grab the leading prefix, to make sure all completions start with `text`
if text_start > key_start:
leading = ''
else:
leading = text[text_start:completion_start]
# the index of the `[` character
bracket_idx = match.end(1)
# append closing quote and bracket as appropriate
# this is *not* appropriate if the opening quote or bracket is outside
# the text given to this method
suf = ''
continuation = self.line_buffer[len(self.text_until_cursor):]
if key_start > text_start and closing_quote:
# quotes were opened inside text, maybe close them
if continuation.startswith(closing_quote):
continuation = continuation[len(closing_quote):]
else:
suf += closing_quote
if bracket_idx > text_start:
# brackets were opened inside text, maybe close them
if not continuation.startswith(']'):
suf += ']'
return [leading + k + suf for k in matches]
def unicode_name_matches(self, text):
u"""Match Latex-like syntax for unicode characters base
on the name of the character.
This does \\GREEK SMALL LETTER ETA -> η
Works only on valid python 3 identifier, or on combining characters that
will combine to form a valid identifier.
Used on Python 3 only.
"""
slashpos = text.rfind('\\')
if slashpos > -1:
s = text[slashpos+1:]
try :
unic = unicodedata.lookup(s)
# allow combining chars
if ('a'+unic).isidentifier():
return '\\'+s,[unic]
except KeyError as e:
pass
return u'', []
def latex_matches(self, text):
u"""Match Latex syntax for unicode characters.
This does both \\alp -> \\alpha and \\alpha -> α
Used on Python 3 only.
"""
slashpos = text.rfind('\\')
if slashpos > -1:
s = text[slashpos:]
if s in latex_symbols:
# Try to complete a full latex symbol to unicode
# \\alpha -> α
return s, [latex_symbols[s]]
else:
# If a user has partially typed a latex symbol, give them
# a full list of options \al -> [\aleph, \alpha]
matches = [k for k in latex_symbols if k.startswith(s)]
return s, matches
return u'', []
def dispatch_custom_completer(self, text):
#io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
line = self.line_buffer
if not line.strip():
return None
# Create a little structure to pass all the relevant information about
# the current completion to any custom completer.
event = Bunch()
event.line = line
event.symbol = text
cmd = line.split(None,1)[0]
event.command = cmd
event.text_until_cursor = self.text_until_cursor
#print "\ncustom:{%s]\n" % event # dbg
# for foo etc, try also to find completer for %foo
if not cmd.startswith(self.magic_escape):
try_magic = self.custom_completers.s_matches(
self.magic_escape + cmd)
else:
try_magic = []
for c in itertools.chain(self.custom_completers.s_matches(cmd),
try_magic,
self.custom_completers.flat_matches(self.text_until_cursor)):
#print "try",c # dbg
try:
res = c(event)
if res:
# first, try case sensitive match
withcase = [r for r in res if r.startswith(text)]
if withcase:
return withcase
# if none, then case insensitive ones are ok too
text_low = text.lower()
return [r for r in res if r.lower().startswith(text_low)]
except TryNext:
pass
return None
def complete(self, text=None, line_buffer=None, cursor_pos=None):
"""Find completions for the given text and line context.
Note that both the text and the line_buffer are optional, but at least
one of them must be given.
Parameters
----------
text : string, optional
Text to perform the completion on. If not given, the line buffer
is split using the instance's CompletionSplitter object.
line_buffer : string, optional
If not given, the completer attempts to obtain the current line
buffer via readline. This keyword allows clients which are
requesting for text completions in non-readline contexts to inform
the completer of the entire text.
cursor_pos : int, optional
Index of the cursor in the full line buffer. Should be provided by
remote frontends where kernel has no access to frontend state.
Returns
-------
text : str
Text that was actually used in the completion.
matches : list
A list of completion matches.
"""
# io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# if the cursor position isn't given, the only sane assumption we can
# make is that it's at the end of the line (the common case)
if cursor_pos is None:
cursor_pos = len(line_buffer) if text is None else len(text)
if PY3:
base_text = text if not line_buffer else line_buffer[:cursor_pos]
latex_text, latex_matches = self.latex_matches(base_text)
if latex_matches:
return latex_text, latex_matches
name_text = ''
name_matches = []
for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
name_text, name_matches = meth(base_text)
if name_text:
return name_text, name_matches
# if text is either None or an empty string, rely on the line buffer
if not text:
text = self.splitter.split_line(line_buffer, cursor_pos)
# If no line buffer is given, assume the input text is all there was
if line_buffer is None:
line_buffer = text
self.line_buffer = line_buffer
self.text_until_cursor = self.line_buffer[:cursor_pos]
# io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# Start with a clean slate of completions
self.matches[:] = []
custom_res = self.dispatch_custom_completer(text)
if custom_res is not None:
# did custom completers produce something?
self.matches = custom_res
else:
# Extend the list of completions with the results of each
# matcher, so we return results to the user from all
# namespaces.
if self.merge_completions:
self.matches = []
for matcher in self.matchers:
try:
self.matches.extend(matcher(text))
except:
# Show the ugly traceback if the matcher causes an
# exception, but do NOT crash the kernel!
sys.excepthook(*sys.exc_info())
else:
for matcher in self.matchers:
self.matches = matcher(text)
if self.matches:
break
# FIXME: we should extend our api to return a dict with completions for
# different types of objects. The rlcomplete() method could then
# simply collapse the dict into a list for readline, but we'd have
# richer completion semantics in other evironments.
# use penalize_magics_key to put magics after variables with same name
self.matches = sorted(set(self.matches), key=penalize_magics_key)
#io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
return text, self.matches
def rlcomplete(self, text, state):
"""Return the state-th possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
Parameters
----------
text : string
Text to perform the completion on.
state : int
Counter used by readline.
"""
if state==0:
self.line_buffer = line_buffer = self.readline.get_line_buffer()
cursor_pos = self.readline.get_endidx()
#io.rprint("\nRLCOMPLETE: %r %r %r" %
# (text, line_buffer, cursor_pos) ) # dbg
# if there is only a tab on a line with only whitespace, instead of
# the mostly useless 'do you want to see all million completions'
# message, just do the right thing and give the user his tab!
# Incidentally, this enables pasting of tabbed text from an editor
# (as long as autoindent is off).
# It should be noted that at least pyreadline still shows file
# completions - is there a way around it?
# don't apply this on 'dumb' terminals, such as emacs buffers, so
# we don't interfere with their own tab-completion mechanism.
if not (self.dumb_terminal or line_buffer.strip()):
self.readline.insert_text('\t')
sys.stdout.flush()
return None
# Note: debugging exceptions that may occur in completion is very
# tricky, because readline unconditionally silences them. So if
# during development you suspect a bug in the completion code, turn
# this flag on temporarily by uncommenting the second form (don't
# flip the value in the first line, as the '# dbg' marker can be
# automatically detected and is used elsewhere).
DEBUG = False
#DEBUG = True # dbg
if DEBUG:
try:
self.complete(text, line_buffer, cursor_pos)
except:
import traceback; traceback.print_exc()
else:
# The normal production version is here
# This method computes the self.matches array
self.complete(text, line_buffer, cursor_pos)
try:
return self.matches[state]
except IndexError:
return None
| artistic-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_cont_NoRot/Geneva_cont_NoRot_4/fullgrid/Optical1.py | 30 | 9342 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_optical_lines.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
sodafree/backend | build/ipython/build/lib.linux-i686-2.7/IPython/lib/latextools.py | 3 | 5023 | # -*- coding: utf-8 -*-
"""Tools for handling LaTeX.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010 IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from StringIO import StringIO
from base64 import encodestring
import os
import tempfile
import shutil
import subprocess
from IPython.utils.process import find_cmd, FindCmdError
#-----------------------------------------------------------------------------
# Tools
#-----------------------------------------------------------------------------
def latex_to_png(s, encode=False, backend='mpl'):
"""Render a LaTeX string to PNG.
Parameters
----------
s : str
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data bebase64 encoded to make it JSON'able.
backend : {mpl, dvipng}
Backend for producing PNG data.
None is returned when the backend cannot be used.
"""
if backend == 'mpl':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s)
if encode and bin_data:
bin_data = encodestring(bin_data)
return bin_data
def latex_to_png_mpl(s):
try:
from matplotlib import mathtext
except ImportError:
return None
mt = mathtext.MathTextParser('bitmap')
f = StringIO()
mt.to_png(f, s, fontsize=12)
return f.getvalue()
def latex_to_png_dvipng(s):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = tempfile.mkdtemp()
tmpfile = os.path.join(workdir, "tmp.tex")
dvifile = os.path.join(workdir, "tmp.dvi")
outfile = os.path.join(workdir, "tmp.png")
with open(tmpfile, "w") as f:
f.write(_latex_header)
f.write(s)
f.write(_latex_footer)
subprocess.check_call(
["latex", "-halt-on-errror", tmpfile], cwd=workdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subprocess.check_call(
["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
"-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(outfile) as f:
bin_data = f.read()
finally:
shutil.rmtree(workdir)
return bin_data
_latex_header = r'''
\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
'''
_latex_footer = r'\end{document}'
_data_uri_template_png = """<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True)
if base64_data:
return _data_uri_template_png % (base64_data, alt)
# From matplotlib, thanks to mdboom. Once this is in matplotlib releases, we
# will remove.
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
from matplotlib.font_manager import FontProperties
from matplotlib.mathtext import MathTextParser
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
| bsd-3-clause |
lammy/artisan | setup-mac.py | 9 | 9103 | """
This is a setup.py script generated by py2applet
Usage:
python setup-mac.py py2app
"""
# manually remove sample-data mpl subdirectory from Python installation:
# sudo rm -rf /Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/matplotlib/mpl-data/sample_data
from distutils import sysconfig
their_parse_makefile = sysconfig.parse_makefile
def my_parse_makefile(filename, g):
their_parse_makefile(filename, g)
g['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
sysconfig.parse_makefile = my_parse_makefile
import sys, os
from setuptools import setup
import string
from plistlib import Plist
import artisanlib
# current version of Artisan
VERSION = artisanlib.__version__
LICENSE = 'GNU General Public License (GPL)'
QTDIR = r'/Developer/Applications/Qt/'
APP = ['artisan.py']
DATA_FILES = [
"LICENSE.txt",
("../Resources/qt_plugins/iconengines", [QTDIR + r'/plugins/iconengines/libqsvgicon.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqsvg.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqjpeg.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqgif.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqtiff.dylib']),
# standard QT translation needed to get the Application menu bar and
# the standard dialog elements translated
("../translations", [QTDIR + r'/translations/qt_de.qm']),
("../translations", [QTDIR + r'/translations/qt_es.qm']),
("../translations", [QTDIR + r'/translations/qt_fr.qm']),
("../translations", [QTDIR + r'/translations/qt_sv.qm']),
("../translations", [QTDIR + r'/translations/qt_zh_CN.qm']),
("../translations", [QTDIR + r'/translations/qt_zh_TW.qm']),
("../translations", [QTDIR + r'/translations/qt_ko.qm']),
("../translations", [QTDIR + r'/translations/qt_pt.qm']),
("../translations", [QTDIR + r'/translations/qt_ru.qm']),
("../translations", [QTDIR + r'/translations/qt_ar.qm']),
("../translations", [QTDIR + r'/translations/qt_ja.qm']),
("../translations", [QTDIR + r'/translations/qt_hu.qm']),
("../translations", [QTDIR + r'/translations/qt_pl.qm']),
("../translations", [r"translations/artisan_de.qm"]),
("../translations", [r"translations/artisan_es.qm"]),
("../translations", [r"translations/artisan_fr.qm"]),
("../translations", [r"translations/artisan_sv.qm"]),
("../translations", [r'translations/artisan_zh_CN.qm']),
("../translations", [r'translations/artisan_zh_TW.qm']),
("../translations", [r'translations/artisan_ko.qm']),
("../translations", [r'translations/artisan_pt.qm']),
("../translations", [r'translations/artisan_ru.qm']),
("../translations", [r'translations/artisan_ar.qm']),
("../translations", [r"translations/artisan_it.qm"]),
("../translations", [r"translations/artisan_el.qm"]),
("../translations", [r"translations/artisan_no.qm"]),
("../translations", [r"translations/artisan_nl.qm"]),
("../translations", [r"translations/artisan_fi.qm"]),
("../translations", [r"translations/artisan_tr.qm"]),
("../translations", [r"translations/artisan_ja.qm"]),
("../translations", [r"translations/artisan_hu.qm"]),
("../translations", [r"translations/artisan_he.qm"]),
("../translations", [r"translations/artisan_pl.qm"]),
("../Resources", [r"qt.conf"]),
("../Resources", [r"artisanProfile.icns"]),
("../Resources", [r"artisanAlarms.icns"]),
("../Resources", [r"artisanPalettes.icns"]),
("../Resources", [r"artisanWheel.icns"]),
("../Resources", [r"includes/alarmclock.eot"]),
("../Resources", [r"includes/alarmclock.svg"]),
("../Resources", [r"includes/alarmclock.ttf"]),
("../Resources", [r"includes/alarmclock.woff"]),
("../Resources", [r"includes/artisan.tpl"]),
("../Resources", [r"includes/bigtext.js"]),
("../Resources", [r"includes/Humor-Sans.ttf"]),
("../Resources", [r"includes/jquery-1.11.1.min.js"]),
]
plist = Plist.fromFile('Info.plist')
plist.update({ 'CFBundleDisplayName': 'Artisan',
'CFBundleGetInfoString' : 'Artisan, Roast Logger',
'CFBundleIdentifier': 'com.google.code.p.Artisan',
'CFBundleShortVersionString': VERSION,
'CFBundleVersion': 'Artisan ' + VERSION,
'LSMinimumSystemVersion': '10.6',
'LSMultipleInstancesProhibited':'false',
'LSPrefersPPC': False,
'LSArchitecturePriority': 'x86_64',
'NSHumanReadableCopyright': LICENSE
})
OPTIONS = {
'strip':True,
'argv_emulation': False, # this would confuses GUI processing
'semi_standalone': False,
'site_packages': True,
'dylib_excludes': ['phonon','QtDBus','QtDeclarative','QtDesigner',
'QtHelp','QtMultimedia','QtNetwork',
'QtOpenGL','QtScript','QtScriptTools',
'QtSql','QtTest','QtXmlPatterns','QtWebKit'],
'packages': ['yoctopuce','gevent'],
'optimize': 2,
'compressed': True,
'iconfile': 'artisan.icns',
'arch': 'x86_64',
'matplotlib_backends': '-', # '-' for imported or explicit 'qt4agg'
'includes': ['serial',
'PyQt4',
'PyQt4.QtCore',
'PyQt4.QtGui',
'PyQt4.QtSvg',
'PyQt4.QtXml'],
'excludes' : ['_tkagg','_ps','_fltkagg','Tkinter','Tkconstants',
'_agg','_cairo','_gtk','gtkcairo','pydoc','sqlite3',
'bsddb','curses','tcl',
'_wxagg','_gtagg','_cocoaagg','_wx'],
'plist' : plist}
setup(
name='Artisan',
version=VERSION,
author='YOUcouldbeTOO',
author_email='[email protected]',
license=LICENSE,
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app']
)
os.system(r'cp README.txt dist')
os.system(r'cp LICENSE.txt dist')
os.system(r'mkdir dist/Wheels')
os.system(r'mkdir dist/Wheels/Cupping')
os.system(r'mkdir dist/Wheels/Other')
os.system(r'mkdir dist/Wheels/Roasting')
os.system(r'cp Wheels/Cupping/* dist/Wheels/Cupping')
os.system(r'cp Wheels/Other/* dist/Wheels/Other')
os.system(r'cp Wheels/Roasting/* dist/Wheels/Roasting')
os.chdir('./dist')
# delete unused Qt.framework files (py2app exclude does not seem to work)
print '*** Removing unused Qt frameworks ***'
for fw in [
'phonon',
'QtDeclarative',
'QtHelp',
'QtMultimedia',
'QtNetwork',
'QtOpenGL',
'QtScript',
'QtScriptTools',
'QtSql',
'QtDBus',
'QtDesigner',
'QtTest',
'QtWebKit',
'QtXMLPatterns']:
for root,dirs,files in os.walk('./Artisan.app/Contents/Frameworks/' + fw + ".framework"):
for file in files:
print 'Deleting', file
os.remove(os.path.join(root,file))
print '*** Removing Qt debug libs ***'
for root, dirs, files in os.walk('.'):
for file in files:
if 'debug' in file:
print 'Deleting', file
os.remove(os.path.join(root,file))
elif file.startswith('test_'):
print 'Deleting', file
os.remove(os.path.join(root,file))
elif '_tests' in file:
print 'Deleting', file
os.remove(os.path.join(root,file))
elif file.endswith('.pyc') and file != "site.pyc" and os.path.isfile(os.path.join(root,file[:-3] + 'pyo')):
print 'Deleting', file
os.remove(os.path.join(root,file))
# remove also all .h .in .cpp .cc .html files
elif file.endswith('.h') and file != "pyconfig.h":
print 'Deleting', file
os.remove(os.path.join(root,file))
elif file.endswith('.in'):
print 'Deleting', file
os.remove(os.path.join(root,file))
elif file.endswith('.cpp'):
print 'Deleting', file
os.remove(os.path.join(root,file))
elif file.endswith('.cc'):
print 'Deleting', file
os.remove(os.path.join(root,file))
# .afm files should not be removed as without matplotlib will fail on startup
# elif file.endswith('.afm'):
# print 'Deleting', file
# os.remove(os.path.join(root,file))
# remove test files
for dir in dirs:
if 'tests' in dir:
for r,d,f in os.walk(os.path.join(root,dir)):
for fl in f:
print 'Deleting', os.path.join(r,fl)
os.remove(os.path.join(r,fl))
os.chdir('..')
os.system(r"rm artisan-mac-" + VERSION + r".dmg")
os.system(r'hdiutil create artisan-mac-' + VERSION + r'.dmg -volname "Artisan" -fs HFS+ -srcfolder "dist"')
# otool -L dist/Artisan.app/Contents/MacOS/Artisan
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/series/test_datetime_values.py | 7 | 17543 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, date
import numpy as np
import pandas as pd
from pandas.types.common import is_integer_dtype, is_list_like
from pandas import (Index, Series, DataFrame, bdate_range,
date_range, period_range, timedelta_range)
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
import pandas.core.common as com
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesDatetimeValues(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_dt_namespace_accessor(self):
# GH 7207, 11128
# test .dt namespace accessor
ok_for_base = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'freq', 'days_in_month',
'daysinmonth', 'is_leap_year']
ok_for_period = ok_for_base + ['qyear', 'start_time', 'end_time']
ok_for_period_methods = ['strftime', 'to_timestamp', 'asfreq']
ok_for_dt = ok_for_base + ['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end', 'tz',
'weekday_name']
ok_for_dt_methods = ['to_period', 'to_pydatetime', 'tz_localize',
'tz_convert', 'normalize', 'strftime', 'round',
'floor', 'ceil', 'weekday_name']
ok_for_td = ['days', 'seconds', 'microseconds', 'nanoseconds']
ok_for_td_methods = ['components', 'to_pytimedelta', 'total_seconds',
'round', 'floor', 'ceil']
def get_expected(s, name):
result = getattr(Index(s._values), prop)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
return Series(result, index=s.index, name=s.name)
def compare(s, name):
a = getattr(s.dt, prop)
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
self.assertEqual(a, b)
else:
tm.assert_series_equal(a, b)
# datetimeindex
cases = [Series(date_range('20130101', periods=5), name='xxx'),
Series(date_range('20130101', periods=5, freq='s'),
name='xxx'),
Series(date_range('20130101 00:00:00', periods=5, freq='ms'),
name='xxx')]
for s in cases:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_localize('US/Eastern')
exp_values = DatetimeIndex(s.values).tz_localize('US/Eastern')
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'US/Eastern')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values,
freq='infer').freq)
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
exp_values = (DatetimeIndex(s.values).tz_localize('UTC')
.tz_convert('US/Eastern'))
expected = Series(exp_values, index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
# round
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.round('D')
expected = Series(pd.to_datetime(['2012-01-02', '2012-01-02',
'2012-01-01']), name='xxx')
tm.assert_series_equal(result, expected)
# round with tz
result = (s.dt.tz_localize('UTC')
.dt.tz_convert('US/Eastern')
.dt.round('D'))
exp_values = pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']).tz_localize('US/Eastern')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(result, expected)
# floor
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.floor('D')
expected = Series(pd.to_datetime(['2012-01-01', '2012-01-01',
'2012-01-01']), name='xxx')
tm.assert_series_equal(result, expected)
# ceil
s = Series(pd.to_datetime(['2012-01-01 13:00:00',
'2012-01-01 12:01:00',
'2012-01-01 08:00:00']), name='xxx')
result = s.dt.ceil('D')
expected = Series(pd.to_datetime(['2012-01-02', '2012-01-02',
'2012-01-02']), name='xxx')
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101', periods=5, tz='US/Eastern'),
name='xxx')
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),
index=s.index, name='xxx')
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'CET')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values,
freq='infer').freq)
# timedeltaindex
cases = [Series(timedelta_range('1 day', periods=5),
index=list('abcde'), name='xxx'),
Series(timedelta_range('1 day 01:23:45', periods=5,
freq='s'), name='xxx'),
Series(timedelta_range('2 days 01:23:45.012345', periods=5,
freq='ms'), name='xxx')]
for s in cases:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
self.assertIsInstance(result, DataFrame)
tm.assert_index_equal(result.index, s.index)
result = s.dt.to_pytimedelta()
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.total_seconds()
self.assertIsInstance(result, pd.Series)
self.assertTrue(result.dtype == 'float64')
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values,
freq='infer').freq)
# both
index = date_range('20130101', periods=3, freq='D')
s = Series(date_range('20140204', periods=3, freq='s'),
index=index, name='xxx')
exp = Series(np.array([2014, 2014, 2014], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.year, exp)
exp = Series(np.array([2, 2, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.month, exp)
exp = Series(np.array([0, 1, 2], dtype='int64'),
index=index, name='xxx')
tm.assert_series_equal(s.dt.second, exp)
exp = pd.Series([s[0]] * 3, index=index, name='xxx')
tm.assert_series_equal(s.dt.normalize(), exp)
# periodindex
cases = [Series(period_range('20130101', periods=5, freq='D'),
name='xxx')]
for s in cases:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_period_methods:
getattr(s.dt, prop)
freq_result = s.dt.freq
self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
results = [r for r in s.dt.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101', periods=5,
freq='D', name='xxx').asobject)
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_period + ok_for_period_methods))))
# 11295
# ambiguous time error on the conversions
s = Series(pd.date_range('2015-01-01', '2016-01-01',
freq='T'), name='xxx')
s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
exp_values = pd.date_range('2015-01-01', '2016-01-01', freq='T',
tz='UTC').tz_convert('America/Chicago')
expected = Series(exp_values, name='xxx')
tm.assert_series_equal(s, expected)
# no setting allowed
s = Series(date_range('20130101', periods=5, freq='D'), name='xxx')
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment', 'raise'):
def f():
s.dt.hour[0] = 5
self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
s = Series(date_range('20130101', periods=5, freq='D'))
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
s.dt.xlabel = "a"
def test_strftime(self):
# GH 10086
s = Series(date_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33',
'2015/02/05 11-22-33', '2015/02/06 11-22-33',
'2015/02/07 11-22-33'])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03',
'2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(period_range(
'2015-02-03 11:22:33.4567', periods=5, freq='s'))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34',
'2015/02/03 11-22-35', '2015/02/03 11-22-36',
'2015/02/03 11-22-37'])
tm.assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5))
s.iloc[0] = pd.NaT
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04',
'2013/01/05'])
tm.assert_series_equal(result, expected)
datetime_index = date_range('20150301', periods=5)
result = datetime_index.strftime("%Y/%m/%d")
expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03',
'2015/03/04', '2015/03/05'], dtype=np.object_)
# dtype may be S10 or U10 depending on python version
self.assert_numpy_array_equal(result, expected, check_dtype=False)
period_index = period_range('20150301', periods=5)
result = period_index.strftime("%Y/%m/%d")
expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03',
'2015/03/04', '2015/03/05'], dtype='<U10')
self.assert_numpy_array_equal(result, expected)
s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14,
32, 1)])
result = s.dt.strftime('%Y-%m-%d %H:%M:%S')
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=4, freq='H'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S')
expected = Series(["2013/01/01 00:00:00", "2013/01/01 01:00:00",
"2013/01/01 02:00:00", "2013/01/01 03:00:00"])
s = Series(period_range('20130101', periods=4, freq='L'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')
expected = Series(["2013/01/01 00:00:00.000",
"2013/01/01 00:00:00.001",
"2013/01/01 00:00:00.002",
"2013/01/01 00:00:00.003"])
tm.assert_series_equal(result, expected)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101', periods=5, freq='D'))
s.iloc[2] = pd.NaT
for attr in ['microsecond', 'nanosecond', 'second', 'minute', 'hour',
'day']:
expected = getattr(s.dt, attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt, attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series(
[date(2013, 1, 1), date(2013, 1, 2), np.nan, date(2013, 1, 4),
date(2013, 1, 5)], dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series(
[time(0), time(0), np.nan, time(0), time(0)], dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.tseries.common import (CombinedDatetimelikeProperties,
DatetimeProperties)
self.assertIs(Series.dt, CombinedDatetimelikeProperties)
s = Series(date_range('2000-01-01', periods=3))
self.assertIsInstance(s.dt, DatetimeProperties)
for s in [Series(np.arange(5)), Series(list('abcde')),
Series(np.random.randn(5))]:
with tm.assertRaisesRegexp(AttributeError,
"only use .dt accessor"):
s.dt
self.assertFalse(hasattr(s, 'dt'))
def test_sub_of_datetime_from_TimeSeries(self):
from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).asobject)
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
def test_date_tz(self):
# GH11757
rng = pd.DatetimeIndex(['2014-04-04 23:56',
'2014-07-18 21:24',
'2015-11-22 22:14'], tz="US/Eastern")
s = Series(rng)
expected = Series([date(2014, 4, 4),
date(2014, 7, 18),
date(2015, 11, 22)])
assert_series_equal(s.dt.date, expected)
assert_series_equal(s.apply(lambda x: x.date()), expected)
| gpl-3.0 |
yunfeilu/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
kevincwright/quagmire | atomic/Li7Data.py | 1 | 4100 | # -*- coding: utf-8 -*-
"""
Properties of Lithium-6
"""
from __future__ import division
from pybec.atomic.core import HeConfig
import pandas as pd
import scipy.constants as spc
# import constants from reference data
from numpy import pi
m_e = spc.physical_constants['electron mass'][0] # in kg
a_0 = spc.physical_constants['Bohr radius'][0] # in meters
gS = spc.physical_constants['electron g factor'][0] # dimensionless
# Lithium Physical Properties
Li7PhysicalData = {
'Z' : 3 , # Atomic Number
'mass' : 1.16503486E-26 , # Atomic Mass
'Inuc' : 3/2 , # Nuclear Spin
'gI' : -0.0011822130 , # Nuclear Lande g-factor
}
# Properties of electronic states:
Li7LevelDataList = pd.Series( [
{'config' : HeConfig + ['2s_{1}'] ,
'term' : '2^2S_{1/2}' , # Term Symbol
'qnums' : (2,0,0.5,0.5) , # (n,L,S,J)
'Ahfs' : 401.7520433 , # Magnetic Dipole Constant (MHz)
} ,
{'config' : HeConfig + ['2p_{1}'] ,
'term' : '2^2P_{1/2}' , # Term Symbol
'qnums' : (2,1,0.5,0.5) , # (n,L,S,J)
'Ahfs' : 45.914 , # Magnetic Dipole Constant (MHz)
} ,
{'config' : HeConfig + ['2p_{1}'] ,
'term' :'2^2P_{3/2}' , # Term Symbol
'qnums' : (2,1,0.5,1.5) , # (n,L,S,J)
'Ahfs' : -3.055 , # Magnetic Dipole Constant (MHz)
'Bhfs' : -0.221 , # Electric Quadrupole Constant(MHz)
} ,
{'config' : HeConfig + ['3s_{1}'] ,
'term' :'3^2S_{1/2}' , # Term Symbol
'qnums' : (3,0,0.5,0.5) , # (n,L,S,J)
'Ahfs' : 0.0 , # Magnetic Dipole Constant (MHz)
'Bhfs' : 0.0 , # Electric Quadrupole Constant(MHz)
} ,
{'config' : HeConfig + ['3p_{1}'] ,
'term' :'3^2P_{1/2}' , # Term Symbol
'qnums' : (3,1,0.5,0.5) , # (n,L,S,J)
'Ahfs' : 13.5 , # Magnetic Dipole Constant (MHz)
'Bhfs' : 0.0 , # Electric Quadrupole Constant(MHz)
} ,
{'config' : HeConfig + ['3p_{1}'] ,
'term' :'3^2P_{3/2}' , # Term Symbol
'qnums' : (3,1,0.5,1.5) , # (n,L,S,J)
'Ahfs' : -0.965 , # Magnetic Dipole Constant (MHz)
'Bhfs' : -0.019 , # Electric Quadrupole Constant(MHz)
}
])
Li7TransitionDataList = pd.Series([
{'label' : 'D1' ,
'levelnumbers' : (0,1) , # numbers of energy levels
'lambda' : 670.976658173E-9 , # wavelength in meters
'frequency' : 446.800129853E12 , # frequency in Hz
'lifetime' : 27.102E-9 , # lifetime in seconds
'Gamma' : 2*pi*5.8724E6 , # nat. linewidth in rad/second
},
{'label' : 'D2' ,
'levelnumbers' : (0,2) , # numbers of energy levels
'lambda' : 670.961560887E-9 , # wavelength in meters
'frequency' : 446.810183289E12 , # frequency in Hz
'lifetime' : 27.102E-9 , # lifetime in seconds
'Gamma' : 2*pi*5.8724E6 , # natural linewidth in radians/second
},
{'label' : 'UVMOT' ,
'levelnumbers' : (0,5) , # numbers of energy levels
'lambda' : 323.3590E-7 , # wavelength in meters
'frequency' : 927.120E12 , # frequency in Hz
'lifetime' : 998.4E-9 , # lifetime in seconds
'Gamma' : 2*pi*159.4E6 , # natural linewidth in radians/second
},
])
Li7CollisionData = {
'a_triplet' : -27.6 * a_0 , # triplet scattering length in m
'a_triplet' : 33 * a_0 , # singlet scattering length in m
} | gpl-3.0 |
micahcochran/geopandas | geopandas/tests/test_types.py | 2 | 2613 | from __future__ import absolute_import
from pandas import Series, DataFrame
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
class TestSeries:
def setup_method(self):
N = self.N = 10
r = 0.5
self.pts = GeoSeries([Point(x, y) for x, y in zip(range(N), range(N))])
self.polys = self.pts.buffer(r)
def test_slice(self):
assert type(self.pts[:2]) is GeoSeries
assert type(self.pts[::2]) is GeoSeries
assert type(self.polys[:2]) is GeoSeries
def test_head(self):
assert type(self.pts.head()) is GeoSeries
def test_tail(self):
assert type(self.pts.tail()) is GeoSeries
def test_sort_index(self):
assert type(self.pts.sort_index()) is GeoSeries
def test_loc(self):
assert type(self.pts.loc[5:]) is GeoSeries
def test_iloc(self):
assert type(self.pts.iloc[5:]) is GeoSeries
def test_fancy(self):
idx = (self.pts.index.to_series() % 2).astype(bool)
assert type(self.pts[idx]) is GeoSeries
def test_take(self):
assert type(self.pts.take(list(range(0, self.N, 2)))) is GeoSeries
def test_select(self):
assert type(self.pts.select(lambda x: x % 2 == 0)) is GeoSeries
def test_groupby(self):
for f, s in self.pts.groupby(lambda x: x % 2):
assert type(s) is GeoSeries
class TestDataFrame:
def setup_method(self):
N = 10
self.df = GeoDataFrame([
{'geometry': Point(x, y), 'value1': x + y, 'value2': x*y}
for x, y in zip(range(N), range(N))])
def test_geometry(self):
assert type(self.df.geometry) is GeoSeries
# still GeoSeries if different name
df2 = GeoDataFrame({"coords": [Point(x, y) for x, y in zip(range(5),
range(5))],
"nums": range(5)}, geometry="coords")
assert type(df2.geometry) is GeoSeries
assert type(df2['coords']) is GeoSeries
def test_nongeometry(self):
assert type(self.df['value1']) is Series
def test_geometry_multiple(self):
assert type(self.df[['geometry', 'value1']]) is GeoDataFrame
def test_nongeometry_multiple(self):
assert type(self.df[['value1', 'value2']]) is DataFrame
def test_slice(self):
assert type(self.df[:2]) is GeoDataFrame
assert type(self.df[::2]) is GeoDataFrame
def test_fancy(self):
idx = (self.df.index.to_series() % 2).astype(bool)
assert type(self.df[idx]) is GeoDataFrame
| bsd-3-clause |
chitrangpatel/presto | bin/subband_smearing.py | 2 | 4614 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as num
import psr_utils as pu
def subband_smear(DM, subDM, subBW, fctr):
"""
subband_smear(DM, subDM, subBW, fctr):
Return the smearing in ms caused by subbanding at DM='DM' given
subbands of bandwidth 'subBW' (MHz) at DM='subDM'. All values
are computed at the frequency fctr in MHz.
"""
return 1000.0 * pu.dm_smear(num.fabs(DM-subDM), subBW, fctr)
def chan_smear(DM, chanDM, chanBW, fctr):
"""
chan_smear(DM, chanDM, chanBW, fctr):
Return the smearing in ms caused by a finite channels at DM='DM'
given channels of bandwidth 'chanBW' (MHz) at DM='chanDM'. All
values are computed at the frequency fctr in MHz.
"""
return subband_smear(DM, chanDM, chanBW, fctr)
def orig_smear(DM, nchan, chanDM, BW, fctr, dt):
"""
orig_smear(DM, nchan, chanDM, BW, fctr, dt):
Return the total smearing in ms due to the sampling rate,
and the smearing over each channel.
"""
return num.sqrt((1000.0*dt)**2.0 +
chan_smear(DM, chanDM, BW/nchan, fctr)**2.0)
def total_smear(DM, nchan, chanDM, nsub, subDM,
BW, fctr, dt, downsamp):
"""
total_smear(DM, nchan, chanDM, nsub, subDM,
BW, fctr, dt, downsamp):
Return the total smearing in ms due to the original channel
format and the properties of the subbands.
"""
# the factor of two comes from integer-bin shifts when doing
# the incoherent subbanding
return num.sqrt(2 * (1000.0*dt*downsamp)**2.0 +
chan_smear(DM, chanDM, BW/nchan, fctr)**2.0 +
subband_smear(DM, subDM, BW/nsub, fctr)**2.0)
def usage():
print """
usage: subband_smearing.py [options]
[-l loDM, --loDM=loDM] : Low DM
[-h hiDM, --hiDM=HIDM] : High DM
[-t dt, --dt=dt] : Sample time (s)
[-s subbands, --nsub=nsub] : Number of subbands
[-m subdm, --subDM=subDM] : DM of each channel
[-f fctr, --fctr=fctr] : Center frequency in MHz
[-b BW, --bw=bandwidth] : Bandwidth in MHz
[-n #chan, --nchan=#chan] : Number of channels
[-c chanDM, --chanDM=chanDM] : DM in each channel (default = 0.0)
[-d N, --downsamp=downsamp] : Integer downsample (default = 1)
"""
if __name__=='__main__':
import getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], "l:h:t:s:m:f:b:n:c:d:",
["loDM=", "hiDM=", "dt=",
"nsub=", "subDM="
"fctr=", "bw=",
"nchan=", "chanDM=", "downsamp="])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(sys.argv)==1:
usage()
sys.exit(2)
# Defaults
chanDM = 0.0
downsamp = 1
for o, a in opts:
if o in ("-l", "--loDM"):
loDM = float(a)
elif o in ("-h", "--hiDM"):
hiDM = float(a)
elif o in ("-t", "--dt"):
dt = float(a)
elif o in ("-s", "--nsub"):
nsub = int(a)
elif o in ("-m", "--subDM"):
subDM = float(a)
elif o in ("-f", "--fctr"):
fctr = float(a)
elif o in ("-b", "--bw"):
BW = float(a)
elif o in ("-n", "--nchan"):
nchan = int(a)
elif o in ("-c", "--chanDM"):
chanDM = float(a)
elif o in ("-d", "--downsamp"):
downsamp = float(a)
DMs = num.linspace(loDM, hiDM, 1000)
samp = num.ones_like(DMs) * 1000.0 * dt
dsamp = samp * downsamp
chan = chan_smear(DMs, chanDM, BW/nchan, fctr)
subband = subband_smear(DMs, subDM, BW/nsub, fctr)
orig = orig_smear(DMs, nchan, chanDM, BW, fctr, dt)
total = total_smear(DMs, nchan, chanDM, nsub, subDM,
BW, fctr, dt, downsamp)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogy(DMs, samp, 'g:',
DMs, dsamp, 'g--',
DMs, chan, 'r:',
DMs, subband, 'r--',
DMs, orig, 'k:',
DMs, total, 'k')
leg = ax.legend(('Sampling time', 'Downsampling',
'Channel smear', 'Subband smear',
'Original time res', 'Total time res'),
'upper center')
ax.set_xlabel('Disperson Measure')
ax.set_ylabel('Smearing (ms)')
ax.set_xlim([DMs.min(), DMs.max()])
ax.set_ylim([0.5*1000.0*dt, 2.0*total.max()])
plt.show()
| gpl-2.0 |
jooolia/oligotyping | Oligotyping/utils/random_colors.py | 2 | 2895 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 - 2012, A. Murat Eren
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
import copy
import random
import matplotlib.cm as cm
#
# all available colormaps in matplotlib can be seen via
# http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
#
def getColor(name, n):
return cm.get_cmap(name, lut=n+2)
def get_hex_color(rgba_color):
hex_color = '#'
for t in rgba_color[0:3]:
h = str(hex(int(t * 255)))[2:]
hex_color += '00' if h == '0' else h
return hex_color + '0' * (7 - len(hex_color))
def random_colors(oligotypes, output_file_path = None, colormap = 'Paired'):
oligotypes_shuffled = copy.deepcopy(oligotypes)
random.shuffle(oligotypes_shuffled)
color_dict = {}
colors = getColor(colormap, len(oligotypes_shuffled))
for i in range(0, len(oligotypes_shuffled)):
color_dict[oligotypes_shuffled[i]] = get_hex_color(colors(i))
if output_file_path:
output_file = open(output_file_path, 'w')
for oligotype in oligotypes:
output_file.write('\n'.join(['%s\t%s\n' % (oligotype, color_dict[oligotype])]))
return color_dict
def get_list_of_colors(number_of_colors, colormap = 'OrRd'):
color_list = getColor(colormap, number_of_colors)
return [get_hex_color(color_list(i)) for i in range(0, number_of_colors)]
def get_color_shade_dict_for_list_of_values(values, colormap = 'OrRd'):
color_shade_dict = {}
colors = getColor(colormap, len(values) * 1000)
max_val = max(values) if max(values) > 1 else 1
for val in values:
i = float(val) / max_val
color_shade_dict[val] = get_hex_color(colors(i))
return color_shade_dict
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generate Random Colors')
parser.add_argument('oligotypes', metavar = 'FASTA_FILE', help = 'Oligotype sequences in FASTA format')
parser.add_argument('--output-file', default = None, metavar = 'OUTPUT_FILE',\
help = 'File name to store random colors')
parser.add_argument('--colormap', default = 'Accent', metavar = 'MATPLOTLOB_COLORMAP',\
help = 'see http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps')
args = parser.parse_args()
oligotypes = [line.strip() for line in open(parser.parse_args().oligotypes) if not line.startswith('>')]
colors_dict = random_colors(oligotypes, output_file_path = args.output_file, colormap = args.colormap)
if not args.output_file:
for oligo in colors_dict:
print '%s: %s' % (oligo, colors_dict[oligo])
| gpl-2.0 |
loli/sklearn-ensembletrees | sklearn/ensemble/__init__.py | 1 | 1360 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import MixedForestClassifier
from .forest import MixedForestRegressor
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble", "MixedForestClassifier", "MixedForestRegressor",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
drkjam/pdutils | tests/pdutils/serialize/test_json.py | 1 | 2029 | import datetime as dt
import pytest
import jsonpickle
import numpy as np
import pandas as pd
from pdutils.serialize.json import register_handlers
from pdutils.compare import ndarray_compare, ts_compare, df_compare
from pdutils.assert_funcs import assert_
register_handlers()
@pytest.mark.parametrize('arr', [
np.array([1, 2, 3]),
np.array([1., 2., 3.]),
np.array(['foo', 'bar', 'baz']),
np.array([dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58), dt.datetime(1970, 1, 1, 12, 59)]),
np.array([dt.date(1970, 1, 1), dt.date(1970, 1, 2), dt.date(1970, 1, 3)]),
])
def test_numpy_array_handler(arr):
buf = jsonpickle.encode(arr)
arr_after = jsonpickle.decode(buf)
assert_(ndarray_compare(arr, arr_after))
@pytest.mark.parametrize('ts', [
pd.TimeSeries([1, 2, 3], index=[0, 1, 2]),
pd.TimeSeries([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S'))
])
def test_pandas_timeseries_handler(ts):
buf = jsonpickle.encode(ts)
ts_after = jsonpickle.decode(buf)
assert_(ts_compare(ts, ts_after))
@pytest.mark.parametrize('df', [
pd.DataFrame({0: [1, 2, 3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='S')),
])
def test_pandas_dataframe_handler(df):
buf = jsonpickle.encode(df)
ts_after = jsonpickle.decode(buf)
assert_(df_compare(df, ts_after))
def test_mixed_python_and_pandas_types():
data = (
np.array([1., 2., 3.]),
pd.TimeSeries([1, 2, 3], index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='S'))
)
buf = jsonpickle.encode(data)
data_after = jsonpickle.decode(buf)
assert isinstance(data, tuple)
assert len(data) == 3
assert_(ndarray_compare(data[0], data_after[0]))
assert_(ts_compare(data[1], data_after[1]))
assert_(df_compare(data[2], data_after[2]))
| mit |
darioizzo/optimal_landing | indirect_method/simple_landing_close.py | 1 | 13863 | """
Implements an indirect method to solve the optimal control
problem of a varying mass spacecraft. No attitude is present,
hence the name "simple"
Dario Izzo 2016
"""
from PyGMO.problem._base import base
from numpy.linalg import norm
from math import sqrt, sin, cos, atan2
from scipy.integrate import odeint
from numpy import linspace
from copy import deepcopy
import sys
import numpy as np
class simple_landing(base):
def __init__(
self,
state0 = [0., 1000., 20., -5., 10000.],
statet = [0., 0., 0., 0, 9758.695805],
c1=44000.,
c2 = 311. * 9.81,
g = 1.6229,
homotopy = 0.,
pinpoint = False
):
"""
USAGE: reachable(self, start, end, Isp, Tmax, mu):
* state0: initial state [x, y, vx, vy, m] in m,m,m/s,m/s,kg
* statet: target state [x, y, vx, vy, m] in m,m,m/s,m/s,kg
* c1: maximum thrusts for the main thruster (N)
* c2: veff, Isp*g0 (m / s)
* g: planet gravity [m/s**2]
* homotopy: homotopy parameter, 0->QC, 1->MOC
* pinpoint: if True toggles the final constraint on the landing x
"""
super(simple_landing, self).__init__(6, 0, 1, 6, 0, 1e-5)
# We store the raw inputs for convenience
self.state0_input = state0
self.statet_input = statet
# We define the non dimensional units (will use these from here on)
self.R = 10.
self.V = 1.
self.M = 10000.
self.A = (self.V * self.V) / self.R
self.T = self.R / self.V
self.F = self.M * self.A
# We store the parameters
self.c1 = c1 / self.F
self.c2 = c2 / self.V
self.g = g / self.A
# We compute the initial and final state in the new units
self.state0 = self._non_dim(self.state0_input)
self.statet = self._non_dim(self.statet_input)
# We set the bounds (these will only be used to initialize the population)
self.set_bounds([-1.] * 5 + [1e-04], [1.] * 5 + [100. / self.T])
# Activates a pinpoint landing
self.pinpoint = pinpoint
# Stores the homotopy parameter, 0->QC, 1->MOC
self.homotopy = homotopy
def _objfun_impl(self, x):
return(1.,) # constraint satisfaction, no objfun
def _compute_constraints_impl(self, x):
# Perform one forward shooting
xf, info = self._shoot(x)
# Assembling the equality constraint vector
ceq = list([0]*6)
# Final conditions
if self.pinpoint:
#Pinpoint landing x is fixed lx is free
ceq[0] = (xf[-1][0] - self.statet[0] )
else:
#Transversality condition: x is free lx is 0
ceq[0] = xf[-1][5] ** 2
ceq[1] = (xf[-1][1] - self.statet[1] )
ceq[2] = (xf[-1][2] - self.statet[2] )
ceq[3] = (xf[-1][3] - self.statet[3] )
# Transversality condition on mass (free)
ceq[4] = xf[-1][9] ** 2
# Free time problem, Hamiltonian must be 0
ceq[5] = self._hamiltonian(xf[-1]) ** 2
return ceq
def _hamiltonian(self, full_state):
state = full_state[:5]
costate = full_state[5:]
# Applying Pontryagin minimum principle
controls = self._pontryagin_minimum_principle(full_state)
# Computing the R.H.S. of the state eom
f_vett = self._eom_state(state, controls)
# Assembling the Hamiltonian
H = 0.
for l, f in zip(costate, f_vett):
H += l * f
# Adding the integral cost function (WHY -)
H += self._cost(state, controls)
return H
def _cost(self,state, controls):
c1 = self.c1
c2 = self.c2
u, stheta, ctheta = controls
retval = self.homotopy * c1 / c2 * u + (1 - self.homotopy) * c1**2 / c2 * u**2
return retval
def _eom_state(self, state, controls):
# Renaming variables
x,y,vx,vy,m = state
c1 = self.c1
c2 = self.c2
g = self.g
u, stheta, ctheta = controls
# Equations for the state
dx = vx
dy = vy
dvx = c1 * u / m * stheta
dvy = c1 * u / m * ctheta - g
dm = - c1 * u / c2
return [dx, dy, dvx, dvy, dm]
def _eom_costate(self, full_state, controls):
# Renaming variables
x,y,vx,vy,m,lx,ly,lvx,lvy,lm = full_state
c1 = self.c1
u, stheta, ctheta = controls
# Equations for the costate
lvdotitheta = lvx * stheta + lvy * ctheta
dlx = 0.
dly = 0.
dlvx = - lx
dlvy = - ly
dlm = c1 * u / m**2 * lvdotitheta
return [dlx, dly, dlvx, dlvy, dlm]
def _pontryagin_minimum_principle(self, full_state):
# Renaming variables
c1 = self.c1
c2 = self.c2
x,y,vx,vy,m,lx,ly,lvx,lvy,lm = full_state
lv_norm = sqrt(lvx**2 + lvy**2)
stheta = - lvx / lv_norm
ctheta = - lvy / lv_norm
if self.homotopy == 1:
# Minimum mass
S = 1. - lm - lv_norm / m * c2
if S >= 0:
u=0.
if S < 0:
u=1.
else:
u = 1. / 2. / c1 / (1 - self.homotopy) * (lm + lv_norm * c2 / m - self.homotopy)
u = min(u,1.)
u = max(u,0.)
return [u, stheta, ctheta]
def _eom(self, full_state, t):
# Applying Pontryagin minimum principle
state = full_state[:5]
controls = self._pontryagin_minimum_principle(full_state)
# Equations for the state
dstate = self._eom_state(state, controls)
# Equations for the co-states
dcostate = self._eom_costate(full_state, controls)
return dstate + dcostate
def _shoot(self, x):
# Numerical Integration
xf, info = odeint(lambda a,b: self._eom(a,b), self.state0 + list(x[:-1]), linspace(0, x[-1],100), rtol=1e-13, atol=1e-13, full_output=1, mxstep=2000)
return xf, info
def _simulate(self, x, tspan):
# Numerical Integration
xf, info = odeint(lambda a,b: self._eom(a,b), self.state0 + list(x[:-1]), tspan, rtol=1e-13, atol=1e-13, full_output=1, mxstep=2000)
return xf, info
def _non_dim(self, state):
xnd = deepcopy(state)
xnd[0] /= self.R
xnd[1] /= self.R
xnd[2] /= self.V
xnd[3] /= self.V
xnd[4] /= self.M
return xnd
def _dim_back(self, state):
xd = deepcopy(state)
xd[0] *= self.R
xd[1] *= self.R
xd[2] *= self.V
xd[3] *= self.V
xd[4] *= self.M
return xd
def plot(self, x):
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
# Producing the data
tspan = linspace(0, x[-1], 100)
full_state, info = self._simulate(x, tspan)
# Putting dimensions back
res = list()
controls = list()
ux = list(); uy=list()
for line in full_state:
res.append(self._dim_back(line[:5]))
controls.append(self._pontryagin_minimum_principle(line))
ux.append(controls[-1][0]*controls[-1][1])
uy.append(controls[-1][0]*controls[-1][2])
tspan = [it * self.T for it in tspan]
x = list(); y=list()
vx = list(); vy = list()
m = list()
for state in res:
x.append(state[0])
y.append(state[1])
vx.append(state[2])
vy.append(state[3])
m.append(state[4])
fig = plt.figure()
ax = fig.gca()
ax.plot(x, y, color='r', label='Trajectory')
ax.quiver(x, y, ux, uy, label='Thrust', pivot='tail', width=0.001)
ax.set_ylim(0,self.state0_input[1]+500)
f, axarr = plt.subplots(3, 2)
axarr[0,0].plot(x, y)
axarr[0,0].set_xlabel('x'); axarr[0,0].set_ylabel('y');
axarr[1,0].plot(vx, vy)
axarr[1,0].set_xlabel('vx'); axarr[1,0].set_ylabel('vy');
axarr[2,0].plot(tspan, m)
axarr[0,1].plot(tspan, [controls[ix][0] for ix in range(len(controls))],'r')
axarr[0,1].set_ylabel('u')
axarr[0,1].set_xlabel('t')
axarr[1,1].plot(tspan, [atan2(controls[ix][1], controls[ix][2]) for ix in range(len(controls))],'k')
axarr[1,1].set_ylabel('theta')
axarr[1,1].set_xlabel('t')
axarr[2,1].plot(tspan, [controls[ix][2] for ix in range(len(controls))],'k')
plt.ion()
plt.show()
return axarr
def human_readable_extra(self):
s = "\n\tDimensional inputs:\n"
s = s + "\tStarting state: " + str(self.state0_input) + "\n"
s = s + "\tTarget state: " + str(self.statet_input) + "\n"
s = s + "\tThrusters maximum magnitude [N]: " + str(self.c1 * self.F) + "\n"
s = s + "\tIsp * g0: " + str(self.c2 * self.V) + ", gravity: " + str(self.g * self.A) + "\n"
s = s + "\n\tNon - Dimensional inputs:\n"
s = s + "\tStarting state: " + str(self.state0) + "\n"
s = s + "\tTarget state: " + str(self.statet) + "\n"
s = s + "\tThrusters maximum magnitude [N]: " + str(self.c1) + "\n"
s = s + "\tIsp * g0: " + str(self.c2) + ", gravity: " + str(self.g) + "\n\n"
s = s + "\tHomotopy parameter: " + str(self.homotopy)
s = s + "\tPinpoint?: " + str(self.pinpoint)
return s
def produce_data(self, x, npoints):
# Producing the data
tspan = linspace(0, x[-1], 100)
full_state, info = self._simulate(x, tspan)
# Putting dimensions back
res = list()
controls = list()
u1 = list(); u2 = list()
for line in full_state:
res.append(self._dim_back(line[:5]))
controls.append(self._pontryagin_minimum_principle(line))
u1.append(controls[-1][0])
u2.append(atan2(controls[-1][1], controls[-1][2]))
u1 = np.vstack(u1)
u2 = np.vstack(u2)
tspan = [it * self.T for it in tspan]
x = list(); y=list()
vx = list(); vy = list()
m = list()
for state in res:
x.append(state[0])
y.append(state[1])
vx.append(state[2])
vy.append(state[3])
m.append(state[4])
tspan = np.vstack(tspan)
x = np.vstack(x)
y = np.vstack(y)
vx = np.vstack(vx)
vy = np.vstack(vy)
m = np.vstack(m)
return (np.hstack((tspan, x, y, vx, vy, m)), np.hstack((u1, u2)))
if __name__ == "__main__":
from PyGMO import *
from random import random
# Use SNOPT if possible
algo = algorithm.snopt(200, opt_tol=1e-5, feas_tol=1e-6)
# Alternatively the scipy SQP solver can be used
#algo = algorithm.scipy_slsqp(max_iter = 1000,acc = 1E-8,epsilon = 1.49e-08, screen_output = True)
#algo.screen_output = True
# Define the starting area (x0 will be irrelevanto if pinpoint is not True)
x0b = [-100, 100]
y0b = [500, 2000]
vx0b = [-100, 100]
vy0b = [-30, 10]
m0b = [8000, 12000]
x0 = random() * (x0b[1] - x0b[0]) + x0b[0]
y0 = random() * (y0b[1] - y0b[0]) + y0b[0]
vx0 = random() * (vx0b[1] - vx0b[0]) + vx0b[0]
vy0 = random() * (vy0b[1] - vy0b[0]) + vy0b[0]
m0 = random() * (m0b[1] - m0b[0]) + m0b[0]
state0 = [x0, y0, vx0, vy0, m0]
# We start solving the Quadratic Control
print("Trying I.C. {}".format(state0)),
prob = simple_landing(state0 = state0, homotopy=0., pinpoint=True)
count = 1
for i in range(1, 20):
print("Attempt # {}".format(i), end="")
pop = population(prob,1)
pop = algo.evolve(pop)
pop = algo.evolve(pop)
if (prob.feasibility_x(pop[0].cur_x)):
print(" - Success, violation norm is: {0:.4g}".format(norm(pop[0].cur_c)))
break
else:
print(" - Failed, violation norm is: {0:.4g}".format(norm(pop[0].cur_c)))
print("PaGMO reports: ", end="")
print(prob.feasibility_x(pop[0].cur_x))
if not prob.feasibility_x(pop[0].cur_x):
print("No QC solution! Ending here :(")
sys.exit(0)
else:
print("Found QC solution!! Starting Homotopy")
print("from \t to\t step\t result")
# We proceed to solve by homotopy the mass optimal control
# Minimum and maximum step for the continuation
h_min = 1e-8
h_max = 0.3
# Starting step
h = 0.1
trial_alpha = h
alpha = 0
x = pop[0].cur_x
#algo.screen_output = True
while True:
if trial_alpha > 1:
trial_alpha = 1.
print("{0:.5g}, \t {1:.5g} \t".format(alpha, trial_alpha), end="")
print("({0:.5g})\t".format(h), end="")
prob = simple_landing(state0 = state0, pinpoint=True, homotopy=trial_alpha)
pop = population(prob)
pop.push_back(x)
pop = algo.evolve(pop)
if not (prob.feasibility_x(pop[0].cur_x)):
pop = algo.evolve(pop)
pop = algo.evolve(pop)
pop = algo.evolve(pop)
if (prob.feasibility_x(pop[0].cur_x)):
x = pop.champion.x
if trial_alpha == 1:
print(" Success")
break
print(" Success")
h = h * 2.
h = min(h, h_max)
alpha = trial_alpha
trial_alpha = trial_alpha + h
else:
print(" - Failed, ", end="")
print("norm c: {0:.4g}".format(norm(pop[0].cur_c)))
h = h * 0.5
if h < h_min:
print("\nContinuation step too small aborting :(")
sys.exit(0)
trial_alpha = alpha + h
| lgpl-3.0 |
thaihungle/deepexp | ntm-mann/mimic_prepare_seq.py | 1 | 38598 | import pandas
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import pickle
import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import collections
def get_med(fpath2='./data/mimic/PRESCRIPTIONS.csv'):
df2 = pandas.DataFrame.from_csv(fpath2)
adm={}
ls={}
for index, row in df2.iterrows():
id = row['HADM_ID']
if id not in adm:
adm[id]={'date':[],'rep_drug':[]}
ls[id]=0
# if row['STARTDATE'] not in adm[id]['date']:
adm[id]['date'].append(row['STARTDATE'])
adm[id]['rep_drug'].append(row['FORMULARY_DRUG_CD'])
ls[id]+=1
all_ls=[]
for k,v in ls.items():
all_ls.append(v)
c=0
for k,v in adm.items():
if c>10:
break
print('{} vs {}'.format(k,v))
c+=1
pickle.dump(adm, open('./data/mimic/amd2med2.pkl', 'wb'))
print(len(adm))
print(np.max(all_ls))
print(np.min(all_ls))
print(np.average(all_ls))
def build_mimic_med(fpath='./data/mimic/mimic-iii.tsv'):
adm=pickle.load(open('./data/mimic/amd2med2.pkl', 'rb'))
adm2={}
print(len(adm))
df = pandas.DataFrame.from_csv(fpath, sep='\t', index_col=None)
nmc=0
for index, row in df.iterrows():
# print('{} vs {}'.format(row['DIAG'], row['PROC']))
aid = row['HADM_ID']
if aid in adm:
v=adm[aid]['rep_drug']
# if aid==147673:
# print('====')
# # print(row)
# print(v)
# # print(row['SUBJECT_ID'])
# print(index)
# if aid==100003:
# print('+++++')
# # print(row)
# print(v)
# # print(row['SUBJECT_ID'])
# print(index)
adm2[aid]=v
df.set_value(index, 'PROC', str(v))
else:
print('not match {}/{}'.format(nmc,len(adm2)))
df.set_value(index, 'PROC', str([]))
nmc+=1
print(len(adm2))
c=0
for k,v in adm2.items():
if c>10:
break
print('{} vs {}'.format(k,v))
c+=1
all_ls = []
for k,v in adm2.items():
all_ls.append(len(v))
print(np.max(all_ls))
print(np.min(all_ls))
print(np.average(all_ls))
df.to_csv('./data/mimic/mimic-iii-med2.tsv')
def test_content_mimic():
df = pandas.DataFrame.from_csv('./data/mimic/mimic-iii.tsv', sep='\t')
df2 = pandas.DataFrame.from_csv('./data/mimic/mimic-iii-med2.tsv', sep=',')
for index, row in df.iterrows():
# print('{} vs {}'.format(row['DIAG'], row['PROC']))
diag=row['DIAG'][1:-2].split()
proc = row['PROC'][1:-2].split()
print(diag)
print(proc)
break
for index, row in df2.iterrows():
# print('{} vs {}'.format(row['DIAG'], row['PROC']))
diag=row['DIAG'][1:-2].split()
proc = row['PROC'][1:-2].split()
print(diag)
print(proc)
break
def load_all_raw_data(fpath='./data/mimic/mimic-iii.tsv', out_folder='big', top_d=10000, top_p=10000,
adm_range=[2,10], dig_range=[1,40], proc_range=[1,40], sep='\t'):
data_path = os.path.dirname(os.path.abspath(fpath))
out_path = data_path+'/'+out_folder
if not os.path.isdir(out_path):
os.mkdir(out_path)
df = pandas.DataFrame.from_csv(fpath, sep=sep)
patients = {}
count_d={}
count_p={}
count = 0
vcount = 0
for index, row in df.iterrows():
# print('{} vs {}'.format(row['DIAG'], row['PROC']))
diag=row['DIAG'][1:-2].split()
if len(diag)>0:
diag[-1]=diag[-1]+','
for dd in diag:
if dd not in count_d:
count_d[dd]=0
count_d[dd]+=1
proc=row['PROC'][1:-2].split()
if len(proc)>0:
proc[-1] = proc[-1] + ','
for pp in proc:
if pp not in count_p:
count_p[pp]=0
count_p[pp]+=1
if len(proc)>0:
# print(row['ADMITTIME'])
admtime=datetime.datetime.strptime(row['ADMITTIME'].strip(), '%Y-%m-%d %H:%M:%S')
epoch_time = (admtime - datetime.datetime(1970, 1, 1)).total_seconds()
try:
sub_id=row['SUBJECT_ID']
except:
sub_id=row.name
if sub_id not in patients:
patients[sub_id]={"num_adm":0,"ldiags":[],"lprocs":[],"data":[],"time_list":[]}
patients[sub_id]['data'].append((diag, proc,epoch_time))
patients[sub_id]['ldiags'].append(len(diag))
patients[sub_id]['lprocs'].append(len(proc))
patients[sub_id]['num_adm']+=1
vcount+=1
count+=1
print('num patients: {} vs total records: {}'.format(len(patients), count))
print('num patients: {} vs total valid records: {}'.format(len(patients), vcount))
sort_count_d=collections.OrderedDict(sorted(count_d.items(), key=lambda item: -item[1]))
trim_sort_d=[]
tt=0
for k,v in sort_count_d.items():
if tt < 10:
print('{} count {}'.format(k, v))
if tt<top_d:
trim_sort_d.append((k,v))
tt+=1
print('----')
sort_count_p = collections.OrderedDict(sorted(count_p.items(), key=lambda item: -item[1]))
trim_sort_p=[]
tt = 0
for k, v in sort_count_p.items():
if tt < 10:
print('{} count {}'.format(k, v))
if tt<top_p:
trim_sort_p.append((k,v))
tt += 1
sort_count_d = collections.OrderedDict(trim_sort_d)
sort_count_p = collections.OrderedDict(trim_sort_p)
print('len dic')
print(len(sort_count_d))
print(len(sort_count_p))
all_ld=[]
all_lp=[]
all_adm=[]
chosen_patients=[]
nfil = 0
for p, v in patients.items():
if adm_range[0] <= v['num_adm'] <= adm_range[1] and \
min(v['ldiags']) >= dig_range[0] and max(v['ldiags']) <= dig_range[1] and \
min(v['lprocs']) >=proc_range[0] and max(v['lprocs']) <= proc_range[1]:
newv=[]
dlen=[]
plen=[]
for aaa in v['data']:
ndd=[]
npp=[]
dd, pp, tt =aaa
for ddd in dd:
if ddd in sort_count_d:
ndd.append(ddd)
for ppp in pp:
if ppp in sort_count_p:
npp.append(ppp)
if len(ndd)<dig_range[0]:
nfil+=1
continue
# ndd=['rare_code_d']
if len(npp)<proc_range[0]:
nfil+=1
continue
# npp=['rare_code_p']
dlen.append(len(ndd))
plen.append(len(npp))
newv.append((ndd, npp, tt, p))
if len(newv)>=adm_range[0]:
all_ld.extend(dlen)
all_lp.extend(plen)
all_adm.append(len(newv))
chosen_patients.append(newv)
# print(v['data'])
newv.sort(key=lambda x: x[2])
# print(v['data'])
print('num filtered patients: {} vs total: {}'.format(len(chosen_patients), len(patients)))
print('num filtered: {}'.format(nfil))
print('avg all_ld: {} vs max all_ld: {} min {}'.format(sum(all_ld)/len(all_ld), max(all_ld), min(all_ld)))
# plt.hist(all_ld, normed=False, bins=100)
# plt.show()
print('avg all_lp: {} vs max all_lp: {} min {}'.format(sum(all_lp) / len(all_lp), max(all_lp), min(all_lp)))
# plt.hist(all_lp, normed=False, bins=100)
# plt.show()
print('avg all_adm: {} vs max all_adm: {} min {}'.format(sum(all_adm) / len(all_adm), max(all_adm), min(all_adm)))
# plt.hist(all_adm, normed=False, bins=100)
# plt.show()
print('write raw data...')
# random.shuffle(chosen_patients)
pickle.dump(chosen_patients,open(out_path+'/dig_proc_raw.pkl','wb'))
# erm_bow(chosen_patients, adm_range[1], store_path=out_path)
# random.shuffle(chosen_patients)
# erm_seq2seq_noh(chosen_patients, store_path=out_path)
PROC_DATA=0
def prepare_no_adm_from_dump_file(out_path):
chosen_patients=pickle.load(open(out_path + '/dig_proc_raw.pkl', 'rb'))
# erm_bow(chosen_patients, adm_range[1], store_path=out_path)
# random.shuffle(chosen_patients)
erm_seq2seq_noh_no_adm(chosen_patients, store_path=out_path)
def prepare_from_dump_file(out_path):
chosen_patients=pickle.load(open(out_path + '/dig_proc_raw.pkl', 'rb'))
# erm_bow(chosen_patients, adm_range[1], store_path=out_path)
# random.shuffle(chosen_patients)
erm_seq2seq_noh(chosen_patients, store_path=out_path, share_in=False)
def prepare_dual_from_dump_file(out_path):
chosen_patients=pickle.load(open(out_path + '/dig_proc_raw.pkl', 'rb'))
# erm_bow(chosen_patients, adm_range[1], store_path=out_path)
# random.shuffle(chosen_patients)
erm_seq2seq_noh_dual(chosen_patients, store_path=out_path, share_in=False)
def erm_seq2seq_noh_no_adm(dig_proc_list, store_path=''):
all_dig = []
all_proc = []
all_sub_id=[]
patient_adms = {}
pid = 0
aid = 0
for dp in dig_proc_list:
patient_adms[pid] = []
for ad in dp:
patient_adms[pid].append(aid)
aid += 1
# print(ad)
ad=list(ad)
ad[0][0]='dig_'+ad[0][0]
ad[1] = list(sorted(set(ad[1]), key=ad[1].index))
# print(ad[1])
# if PROC_DATA==0:
# ad[1]=list(sorted(set(ad[1]), key=ad[1].index))
# else:
# # ad[1] = list(set(ad[1]))
# # print('ffff')
# pass
# print(ad[1])
# raise False
ad[1][0] = 'proc_' + ad[1][0]
all_dig.append(' dig_'.join(ad[0]).replace(',', ''))
v=' proc_'.join(ad[1]).replace(',', '')
all_proc.append(v)
all_sub_id.append(ad[3])
pid += 1
print('examples of raw diags: {}'.format(all_dig[:5]))
print('examples of raw procs: {}'.format(all_proc[:5]))
str2token={'PAD':0, 'EOS':1}
token2str={0:'PAD',1:'EOS'}
pstr2token = {'PAD': 0, 'EOS': 1}
ptoken2str = {0: 'PAD', 1: 'EOS'}
token_c=2
for dig in all_dig:
strtok=dig.split()
for s in strtok:
if s not in str2token:
str2token[s]=token_c
token2str[token_c]=s
token_c+=1
print('real length dig {}'.format(len(str2token)))
token_c2 = 2
for proc in all_proc:
strtok=proc.split()
for s in strtok:
if s not in pstr2token:
pstr2token[s]=token_c2
ptoken2str[token_c2]=s
token_c2+=1
print('some dict info\n')
print('num token {}'.format(len(str2token)))
print('str2token examples\n')
cc = 0
for k,v in str2token.items():
if cc>10:
break
cc+=1
print('{} vs {}'.format(k, v))
print('num ptoken {}'.format(len(pstr2token)))
print('pstr2token examples\n')
cc = 0
for k, v in pstr2token.items():
if cc > 10:
break
cc += 1
print('{} vs {}'.format(k, v))
patient_records=[]
num_adm=0
all_d_len = []
all_p_len = []
for pid, v in patient_adms.items():
adm_records = []
for i, aid in enumerate(v):
list_dig = all_dig[aid]
list_proc = all_proc[aid]
d = []
for j, s in enumerate(list_dig.split()):
d.append(str2token[s])
p=[]
for j, s in enumerate(list_proc.split()):
p.append(pstr2token[s])
all_d_len.append(len(d))
all_p_len.append(len(p))
adm_records.append([d,p,all_sub_id[aid]])
num_adm+=1
patient_records.append(adm_records)
print('some examples:\n')
for i in range(5):
for t in range(len(patient_records[i])):
print('dig seq {} vs drug_dnc_decode seq {}'.format(patient_records[i][t][0], patient_records[i][t][1]))
for i in range(5):
for t in range(len(patient_records[i])):
print('subject id {}'.format(patient_records[i][t][2]))
print('final num samples {} vs {} adm'.format(len(patient_adms),num_adm))
print('d len max: {} d len min: {} d len avg {}'.format(np.max(all_d_len), np.min(all_d_len), np.mean(all_d_len)))
print('p len max: {} p len min: {} p len avg {}'.format(np.max(all_p_len), np.min(all_p_len), np.mean(all_p_len)))
print('write final input output...')
pickle.dump(str2token, open(store_path + '/dig_str2token_no_adm.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_no_adm.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_no_adm.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_no_adm.pkl', 'wb'))
pickle.dump(patient_records, open(store_path + '/patient_records.pkl', 'wb'))
def erm_seq2seq_noh(dig_proc_list, store_path='', share_in=True):
all_dig = []
all_proc = []
patient_adms = {}
pid = 0
aid = 0
for dp in dig_proc_list:
patient_adms[pid] = []
for ad in dp:
patient_adms[pid].append(aid)
aid += 1
# print(ad)
ad=list(ad)
ad[0][0]='dig_'+ad[0][0]
# print(ad[1])
if PROC_DATA==0:
ad[1]=list(sorted(set(ad[1]), key=ad[1].index))
else:
# ad[1] = list(set(ad[1]))
# print('ffff')
pass
# print(ad[1])
# raise False
ad[1][0] = 'proc_' + ad[1][0]
all_dig.append(' dig_'.join(ad[0]).replace(',', ''))
v=' proc_'.join(ad[1]).replace(',', '')
all_proc.append(v)
pid += 1
print('examples of raw diags: {}'.format(all_dig[:5]))
print('examples of raw procs: {}'.format(all_proc[:5]))
str2token={'PAD':0, 'EOS':1}
token2str={0:'PAD',1:'EOS'}
pstr2token = {'PAD': 0, 'EOS': 1}
ptoken2str = {0: 'PAD', 1: 'EOS'}
token_c=2
for dig in all_dig:
strtok=dig.split()
for s in strtok:
if s not in str2token:
str2token[s]=token_c
token2str[token_c]=s
token_c+=1
print('real length dig {}'.format(len(str2token)))
token_c2 = 2
for proc in all_proc:
strtok=proc.split()
for s in strtok:
if s not in pstr2token:
pstr2token[s]=token_c2
ptoken2str[token_c2]=s
token_c2+=1
if share_in:
if s not in str2token:
str2token[s] = token_c
token2str[token_c] = s
token_c += 1
print('some dict info\n')
print('num token {}'.format(len(str2token)))
print('str2token examples\n')
cc = 0
for k,v in str2token.items():
if cc>10:
break
cc+=1
print('{} vs {}'.format(k, v))
print('num ptoken {}'.format(len(pstr2token)))
print('pstr2token examples\n')
cc = 0
for k, v in pstr2token.items():
if cc > 10:
break
cc += 1
print('{} vs {}'.format(k, v))
dig_records = []
pro_records = []
for pid, v in patient_adms.items():
d = []
p_in = []
# if len(v)==1:
# raise False
for i, aid in enumerate(v):
list_dig = all_dig[aid]
list_proc = all_proc[aid]
for j, s in enumerate(list_dig.split()):
d.append(str2token[s])
d.append(str2token['EOS'])
if i>0:
list_proc2 = all_proc[v[i-1]]
for j, s in enumerate(list_proc2.split()):
if share_in:
p_in.append(str2token[s])
else:
p_in.append(pstr2token[s])
p_in.append(str2token['EOS'])
dig_records.append(d.copy()+[str2token['PAD']]+p_in.copy())
else:
dig_records.append(d.copy())
p=[]
for j, s in enumerate(list_proc.split()):
p.append(pstr2token[s])
# if len(p)!=len(set(p)):
# print(list_proc)
# print(p)
# raise False
pro_records.append(p)
# if len(v)>1:
# print(len(v))
# print(dig_records[-1])
# print(pro_records[-1])
# raise False
# label_count = {}
#
# for y in pro_records:
# k = tuple(y)
# if k not in label_count:
# label_count[k] = 0
# label_count[k] += 1
#
# print('before discard num samples {}'.format(len(dig_records)))
# dig_records2 = []
# pro_records2 = []
# num_discard = 0
# for x, y in zip(dig_records, pro_records):
# k = tuple(y)
# if 1 < label_count[k]:
# dig_records2.append(x)
# pro_records2.append(y)
# else:
# num_discard += 1
#
# print('num discard {}'.format(num_discard))
# dig_records = dig_records2
# pro_records = pro_records2
print('some examples:\n')
for i in range(10):
print('dig seq {} vs drug_dnc_decode seq {}'.format(dig_records[i], pro_records[i]))
print('final num samples {}'.format(len(dig_records)))
print('final num samples {}'.format(len(pro_records)))
print('write final input output...')
if not share_in:
pickle.dump(str2token, open(store_path + '/dig_str2token_noshare.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_noshare.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_noshare.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_noshare.pkl', 'wb'))
pickle.dump(dig_records, open(store_path + '/dig_records_noshare.pkl', 'wb'))
pickle.dump(pro_records, open(store_path + '/pro_records_noshare.pkl', 'wb'))
else:
pickle.dump(str2token, open(store_path + '/dig_str2token_share.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_share.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_share.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_share.pkl', 'wb'))
pickle.dump(dig_records, open(store_path + '/dig_records_share.pkl', 'wb'))
pickle.dump(pro_records, open(store_path + '/pro_records_share.pkl', 'wb'))
ohinput=np.asarray(to_one_hot(dig_records, len(str2token)))
ohoutput = np.asarray(to_one_hot(pro_records, len(pstr2token)))
# pickle.dump(ohinput, open(store_path + '/dig_records_oh.pkl', 'wb'))
# pickle.dump(ohoutput, open(store_path + '/pro_records_oh.pkl', 'wb'))
def erm_seq2seq_noh2(dig_proc_list, store_path='', share_in=True):
all_dig = []
all_proc = []
patient_adms = {}
pid = 0
aid = 0
for dp in dig_proc_list:
patient_adms[pid] = []
for ad in dp:
patient_adms[pid].append(aid)
aid += 1
ad=list(ad)
ad[0][0]='dig_'+ad[0][0]
# print(ad[1])
if PROC_DATA==0:
ad[1]=list(sorted(set(ad[1]), key=ad[1].index))
else:
ad[1] = list(set(ad[1]))
# print(ad[1])
# raise False
ad[1][0] = 'proc_' + ad[1][0]
all_dig.append(' dig_'.join(ad[0]).replace(',', ''))
v=' proc_'.join(ad[1]).replace(',', '')
all_proc.append(v)
pid += 1
print('examples of raw diags: {}'.format(all_dig[:5]))
print('examples of raw procs: {}'.format(all_proc[:5]))
str2token={'PAD':0, 'EOS':1}
token2str={0:'PAD',1:'EOS'}
pstr2token = {'PAD': 0, 'EOS': 1}
ptoken2str = {0: 'PAD', 1: 'EOS'}
token_c=2
for dig in all_dig:
strtok=dig.split()
for s in strtok:
if s not in str2token:
str2token[s]=token_c
token2str[token_c]=s
token_c+=1
print('real length dig {}'.format(len(str2token)))
token_c2 = 2
for proc in all_proc:
strtok=proc.split()
for s in strtok:
if s not in pstr2token:
pstr2token[s]=token_c2
ptoken2str[token_c2]=s
token_c2+=1
if share_in:
if s not in str2token:
str2token[s] = token_c
token2str[token_c] = s
token_c += 1
print('some dict info\n')
print('num token {}'.format(len(str2token)))
print('str2token examples\n')
cc = 0
for k,v in str2token.items():
if cc>10:
break
cc+=1
print('{} vs {}'.format(k, v))
print('num ptoken {}'.format(len(pstr2token)))
print('pstr2token examples\n')
cc = 0
for k, v in pstr2token.items():
if cc > 10:
break
cc += 1
print('{} vs {}'.format(k, v))
dig_records = []
pro_records = []
for pid, v in patient_adms.items():
dp_in=[]
for i, aid in enumerate(v):
d = []
p_in = []
list_dig = all_dig[aid]
list_proc = all_proc[aid]
for j, s in enumerate(list_dig.split()):
d.append(str2token[s])
dp_in+=d
if i>0:
list_proc2 = all_proc[v[i-1]]
for j, s in enumerate(list_proc2.split()):
if share_in:
p_in.append(str2token[s])
else:
p_in.append(pstr2token[s])
dp_in+=[str2token['PAD']]+p_in+[str2token['EOS']]
dig_records.append(dp_in.copy())
p=[]
for j, s in enumerate(list_proc.split()):
p.append(pstr2token[s])
# if len(p)!=len(set(p)):
# print(list_proc)
# print(p)
# raise False
pro_records.append(p)
# if len(v)>1:
# print(len(v))
# print(dig_records[-1])
# print(pro_records[-1])
# raise False
# label_count = {}
#
# for y in pro_records:
# k = tuple(y)
# if k not in label_count:
# label_count[k] = 0
# label_count[k] += 1
#
# print('before discard num samples {}'.format(len(dig_records)))
# dig_records2 = []
# pro_records2 = []
# num_discard = 0
# for x, y in zip(dig_records, pro_records):
# k = tuple(y)
# if 1 < label_count[k]:
# dig_records2.append(x)
# pro_records2.append(y)
# else:
# num_discard += 1
#
# print('num discard {}'.format(num_discard))
# dig_records = dig_records2
# pro_records = pro_records2
print('some examples:\n')
for i in range(10):
print('dig seq {} vs drug_dnc_decode seq {}'.format(dig_records[i], pro_records[i]))
print('final num samples {}'.format(len(dig_records)))
print('final num samples {}'.format(len(pro_records)))
print('write final input output...')
if not share_in:
pickle.dump(str2token, open(store_path + '/dig_str2token_noshare.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_noshare.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_noshare.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_noshare.pkl', 'wb'))
pickle.dump(dig_records, open(store_path + '/dig_records_noshare.pkl', 'wb'))
pickle.dump(pro_records, open(store_path + '/pro_records_noshare.pkl', 'wb'))
else:
pickle.dump(str2token, open(store_path + '/dig_str2token_share.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_share.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_share.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_share.pkl', 'wb'))
pickle.dump(dig_records, open(store_path + '/dig_records_share.pkl', 'wb'))
pickle.dump(pro_records, open(store_path + '/pro_records_share.pkl', 'wb'))
# ohinput=np.asarray(to_one_hot(dig_records, len(str2token)))
# ohoutput = np.asarray(to_one_hot(pro_records, len(pstr2token)))
# pickle.dump(ohinput, open(store_path + '/dig_records_oh.pkl', 'wb'))
# pickle.dump(ohoutput, open(store_path + '/pro_records_oh.pkl', 'wb'))
def to_one_hot(inputs, num_token):
def onehot(index, size):
# print('-----')
# print(index)
vec = np.zeros(size, dtype=np.float32)
vec[int(index)] = 1.0
return vec
ohinputs=[]
for inp in inputs:
oh = np.zeros(num_token, dtype=np.float32)
for c in inp:
oh+=onehot(c,num_token)
ohinputs.append(oh)
return ohinputs
def erm_seq2seq_noh_dual(dig_proc_list, store_path='', share_in=False):
all_dig = []
all_proc = []
patient_adms = {}
pid = 0
aid = 0
for dp in dig_proc_list:
patient_adms[pid] = []
for ad in dp:
patient_adms[pid].append(aid)
aid += 1
ad=list(ad)
ad[0][0]='dig_'+ad[0][0]
# print(ad[1])
if PROC_DATA==0:
ad[1]=list(sorted(set(ad[1]), key=ad[1].index))
else:
# ad[1] = list(set(ad[1]))
# print('ffff')
pass
# print(ad[1])
# raise False
ad[1][0] = 'proc_' + ad[1][0]
all_dig.append(' dig_'.join(ad[0]).replace(',', ''))
v=' proc_'.join(ad[1]).replace(',', '')
all_proc.append(v)
pid += 1
print('examples of raw diags: {}'.format(all_dig[:5]))
print('examples of raw procs: {}'.format(all_proc[:5]))
str2token={'PAD':0, 'EOS':1}
token2str={0:'PAD',1:'EOS'}
pstr2token = {'PAD': 0, 'EOS': 1}
ptoken2str = {0: 'PAD', 1: 'EOS'}
token_c=2
for dig in all_dig:
strtok=dig.split()
for s in strtok:
if s not in str2token:
str2token[s]=token_c
token2str[token_c]=s
token_c+=1
print('real length dig {}'.format(len(str2token)))
token_c2 = 2
for proc in all_proc:
strtok=proc.split()
for s in strtok:
if s not in pstr2token:
pstr2token[s]=token_c2
ptoken2str[token_c2]=s
token_c2+=1
if share_in:
if s not in str2token:
str2token[s] = token_c
token2str[token_c] = s
token_c += 1
print('some dict info\n')
print('num token {}'.format(len(str2token)))
print('str2token examples\n')
cc = 0
for k,v in str2token.items():
if cc>10:
break
cc+=1
print('{} vs {}'.format(k, v))
print('num ptoken {}'.format(len(pstr2token)))
print('pstr2token examples\n')
cc = 0
for k, v in pstr2token.items():
if cc > 10:
break
cc += 1
print('{} vs {}'.format(k, v))
records = []
for pid, v in patient_adms.items():
d_in = []
p_in = []
if len(v)==1:
raise False
for i, aid in enumerate(v):
if i>0:
d_out = []
p_out = []
cur_list_dig = all_dig[aid]
cur_list_proc = all_proc[aid]
pre_list_dig = all_dig[v[i-1]]
pre_list_proc = all_proc[v[i-1]]
for j, s in enumerate(pre_list_dig.split()):
d_in.append(str2token[s])
d_in.append(str2token['EOS'])
for j, s in enumerate(pre_list_proc.split()):
if share_in:
p_in.append(str2token[s])
else:
p_in.append(pstr2token[s])
p_in.append(pstr2token['EOS'])
for j, s in enumerate(cur_list_dig.split()):
d_out.append(str2token[s])
d_out.append(str2token['EOS'])
for j, s in enumerate(cur_list_proc.split()):
p_out.append(pstr2token[s])
p_out.append(pstr2token['EOS'])
records.append({'d_in':d_in.copy(),'p_in':p_in.copy(),
'd_out':d_out.copy(),'p_out':p_out.copy(),
'num_adm':i+1, 'aid':aid, 'pid':pid})
print('some examples:\n')
for i in range(10):
print('Example {}'.format(records[i]))
print('final num samples {}'.format(len(records)))
print('write final input output...')
if not share_in:
pickle.dump(str2token, open(store_path + '/dig_str2token_noshare.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_noshare.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_noshare.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_noshare.pkl', 'wb'))
pickle.dump(records, open(store_path + '/records_noshare_dual.pkl', 'wb'))
else:
pickle.dump(str2token, open(store_path + '/dig_str2token_share.pkl', 'wb'))
pickle.dump(token2str, open(store_path + '/dig_token2str_share.pkl', 'wb'))
pickle.dump(pstr2token, open(store_path + '/pro_str2token_share.pkl', 'wb'))
pickle.dump(ptoken2str, open(store_path + '/pro_token2str_share.pkl', 'wb'))
pickle.dump(records, open(store_path + '/records_share_dual.pkl', 'wb'))
def make_one_hot_from_file_seq_no_adm(store_path='./data/mimic/drug_raw/'):
patient_records = pickle.load(open(store_path + '/patient_records.pkl', 'rb'))
str2token = pickle.load(open(store_path + '/dig_str2token_no_adm.pkl', 'rb'))
pstr2token = pickle.load(open(store_path + '/pro_str2token_no_adm.pkl', 'rb'))
all_p_dig=[]
all_p_proc = []
for p in patient_records:
dig_records = []
pro_records = []
x2=np.zeros(len(str2token))
for a in p:
x=to_one_hot([a[0]], len(str2token))[0]
# print(x)
# raise False
x2+=x
dig_records.append(x2)
x = to_one_hot([a[1]], len(pstr2token))[0]
pro_records.append(x)
all_p_dig.append(dig_records)
all_p_proc.append(pro_records)
pickle.dump(all_p_dig, open(store_path + '/dig_records_oh.pkl', 'wb'))
pickle.dump(all_p_proc, open(store_path + '/pro_records_oh.pkl', 'wb'))
def make_one_hot_from_file_seq(store_path='./data/mimic/drug_raw/'):
dig_records = pickle.load(open(store_path + '/dig_records_share.pkl', 'rb'))
pro_records = pickle.load(open(store_path + '/pro_records_share.pkl', 'rb'))
str2token = pickle.load(open(store_path + '/dig_str2token_share.pkl', 'rb'))
pstr2token = pickle.load(open(store_path + '/pro_str2token_share.pkl', 'rb'))
ohinput=np.asarray(to_one_hot(dig_records, len(str2token)))
ohoutput = np.asarray(to_one_hot(pro_records, len(pstr2token)))
pickle.dump(ohinput, open(store_path + '/dig_records_oh.pkl', 'wb'))
pickle.dump(ohoutput, open(store_path + '/pro_records_oh.pkl', 'wb'))
def make_one_hot_from_file_seq_dual(store_path='./data/mimic/drug_raw/'):
records=pickle.load(open(store_path+'/records_noshare_dual.pkl','rb'))
str2tok = pickle.load(open(store_path+'/dig_str2token_noshare.pkl', 'rb'))
pstr2tok = pickle.load(open(store_path+'/pro_str2token_noshare.pkl', 'rb'))
dins=[]
pins=[]
douts=[]
pouts=[]
for d in records:
dins.append(d['d_in'])
pins.append(d['p_in'])
douts.append(d['d_out'])
pouts.append(d['p_out'])
print('num records {}'.format(len(records)))
dinoh=np.asarray(to_one_hot(dins,len(str2tok)))
pinoh = np.asarray(to_one_hot(pins,len(pstr2tok)))
doutoh = np.asarray(to_one_hot(douts, len(str2tok)))
poutoh = np.asarray(to_one_hot(pouts, len(pstr2tok)))
pickle.dump(dinoh, open(store_path + '/din_oh.pkl', 'wb'))
pickle.dump(pinoh, open(store_path + '/pin_oh.pkl', 'wb'))
pickle.dump(doutoh, open(store_path + '/dout_oh.pkl', 'wb'))
pickle.dump(poutoh, open(store_path + '/pout_oh.pkl', 'wb'))
def check_two_seq_no_adm(name1, name2, store_path='./data/mimic/trim_diag_drug_proc_no_adm', out_t='proc'):
patient_records1 = pickle.load(open('./data/mimic/{}/patient_records.pkl'.format(name1), 'rb'))
# str2tok1 = pickle.load(open('./data/mimic/{}/dig_str2token_no_adm.pkl'.format(name1), 'rb'))
# pstr2tok1 = pickle.load(open('./data/mimic/{}/pro_str2token_no_adm.pkl'.format(name1), 'rb'))
patient_records2 = pickle.load(open('./data/mimic/{}/patient_records.pkl'.format(name2), 'rb'))
# str2tok2 = pickle.load(open('./data/mimic/{}/dig_str2token_no_adm.pkl'.format(name2), 'rb'))
# pstr2tok2 = pickle.load(open('./data/mimic/{}/pro_str2token_no_adm.pkl'.format(name2), 'rb'))
if out_t=='drug':
tmp = patient_records1
patient_records1 = patient_records2
patient_records2 = tmp
p1_sub_ids=set()
for p1 in patient_records1:
for adm in p1:
p1_sub_ids.add(adm[2])
print(len(p1_sub_ids))
new_patient_records2=[]
p2_sub_ids = set()
for p2 in patient_records2:
new_adm=[]
for adm in p2:
if adm[2] in p1_sub_ids:
new_adm.append(adm)
p2_sub_ids.add(adm[2])
if len(new_adm)==len(p2):
new_patient_records2.append(new_adm)
print('{} vs {}'.format(len(new_patient_records2), len(patient_records2)))
new_patient_records1 = []
for p1 in patient_records1:
new_adm = []
for adm in p1:
if adm[2] in p2_sub_ids:
new_adm.append(adm)
if len(new_adm)==len(p1):
new_patient_records1.append(new_adm)
print('{} vs {}'.format(len(new_patient_records1), len(patient_records1)))
new_patient_records1=sorted(new_patient_records1, key=lambda x: x[0][2])
new_patient_records2 = sorted(new_patient_records2, key=lambda x: x[0][2])
new_patient_records=[]
c=0
diag_str2token={"PAD":0,"EOS":1}
diag_token2str={0:"PAD",1:"EOS"}
drug_str2token = {"PAD": 0, "EOS": 1}
drug_token2str = {0: "PAD", 1: "EOS"}
proc_str2token = {"PAD": 0, "EOS": 1}
proc_token2str = {0: "PAD", 1: "EOS"}
list_dict_str2token=[diag_str2token, drug_str2token, proc_str2token]
list_dict_token2str=[diag_token2str, drug_token2str, proc_token2str]
num_adm=0
for p1,p2 in zip(new_patient_records1, new_patient_records2):
if len(p1)==len(p2):
all_adms=[]
for adm1, adm2 in zip(p1,p2):
all_rets=[]
for ii,li in enumerate([adm1[0], adm2[1], adm1[1]]):
ret=[]
for p in li:
if str(p) not in list_dict_str2token[ii]:
list_dict_str2token[ii][str(p)]=len(list_dict_str2token[ii])
list_dict_token2str[ii][list_dict_str2token[ii][str(p)]]=str(p)
ret.append(list_dict_str2token[ii][str(p)])
all_rets.append(ret)
all_adms.append(all_rets)
new_patient_records.append(all_adms)
num_adm+=len(all_adms)
if c<10:
print(all_adms)
print('----')
c+=1
print(len(new_patient_records))
print(num_adm)
print(len(diag_token2str))
print(len(drug_str2token))
print(len(proc_str2token))
if not os.path.isdir(store_path):
os.mkdir(store_path)
pickle.dump(list_dict_str2token, open(store_path + '/list_dict_str2token_no_adm.pkl', 'wb'))
pickle.dump(list_dict_token2str, open(store_path + '/list_dict_token2str_no_adm.pkl', 'wb'))
print('dump sequence')
pickle.dump(new_patient_records, open(store_path + '/patient_records.pkl', 'wb'))
print('dump one hot')
all_p_dig = []
all_p_proc = []
for p in new_patient_records:
dig_records = []
pro_records = []
in_dim=len(diag_str2token)+len(drug_str2token)
x2 = np.zeros(in_dim)
for a in p:
x = to_one_hot([a[0]], len(diag_str2token))[0]
x1 = to_one_hot([a[1]], len(drug_str2token))[0]
x =np.concatenate([x,x1], axis=-1)
# print(x)
# raise False
x2 += x
dig_records.append(x2)
x = to_one_hot([a[2]], len(proc_str2token))[0]
pro_records.append(x)
all_p_dig.append(dig_records)
all_p_proc.append(pro_records)
pickle.dump(all_p_dig, open(store_path + '/dig_records_oh.pkl', 'wb'))
pickle.dump(all_p_proc, open(store_path + '/pro_records_oh.pkl', 'wb'))
if __name__ == '__main__':
# test_content_mimic()
# load_all_raw_data(adm_range=[1, 150], dig_range=[1, 150], proc_range=[1,150], top_d=10000, top_p=10000,
# out_folder='trim_proc_diag_no_adm4',sep='\t')
# # get_med()
# build_mimic_med()
# load_all_raw_data(fpath='./data/mimic/mimic-iii-med2.tsv',
# adm_range=[1, 150], dig_range=[1, 150], proc_range=[1, 10000], top_d=10000, top_p=300,
# out_folder='trim_drug_diag_no_adm4',sep=',')
# prepare_from_dump_file('./data/mimic/trim_diag_next/')
# prepare_dual_from_dump_file('./data/mimic/trim_drug_diag_next/')
# make_one_hot_from_file_seq_dual(store_path='./data/mimic/trim_drug_diag_next/')
# make_one_hot_from_file_seq(store_path='./data/mimic/trim_proc_raw_no_adm/')
prepare_no_adm_from_dump_file('./data/mimic/trim_proc_diag_no_adm4/')
prepare_no_adm_from_dump_file('./data/mimic/trim_drug_diag_no_adm4/')
# make_one_hot_from_file_seq_no_adm('./data/mimic/trim_drug_diag_no_adm/')
check_two_seq_no_adm('trim_proc_diag_no_adm4','trim_drug_diag_no_adm4',
store_path='./data/mimic/trim_diag_proc_drug_no_adm', out_t='drug') | mit |
rgommers/statsmodels | statsmodels/examples/try_polytrend.py | 33 | 1477 |
from __future__ import print_function
import numpy as np
#import statsmodels.linear_model.regression as smreg
from scipy import special
import statsmodels.api as sm
from statsmodels.datasets.macrodata import data
dta = data.load()
gdp = np.log(dta.data['realgdp'])
from numpy import polynomial
from scipy import special
maxorder = 20
polybase = special.chebyt
polybase = special.legendre
t = np.linspace(-1,1,len(gdp))
exog = np.column_stack([polybase(i)(t) for i in range(maxorder)])
fitted = [sm.OLS(gdp, exog[:, :maxr]).fit().fittedvalues for maxr in
range(2,maxorder)]
print((np.corrcoef(exog[:,1:6], rowvar=0)*10000).astype(int))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gdp, 'o')
for i in range(maxorder-2):
plt.plot(fitted[i])
plt.figure()
#plt.plot(gdp, 'o')
for i in range(maxorder-4, maxorder-2):
#plt.figure()
plt.plot(gdp - fitted[i])
plt.title(str(i+2))
plt.figure()
plt.plot(gdp, '.')
plt.plot(fitted[-1], lw=2, color='r')
plt.plot(fitted[0], lw=2, color='g')
plt.title('GDP and Polynomial Trend')
plt.figure()
plt.plot(gdp - fitted[-1], lw=2, color='r')
plt.plot(gdp - fitted[0], lw=2, color='g')
plt.title('Residual GDP minus Polynomial Trend (green: linear, red: legendre(20))')
#orthonormalize an exog using QR
ex2 = t[:,None]**np.arange(6) #np.vander has columns reversed
q2,r2 = np.linalg.qr(ex2, mode='full')
np.max(np.abs(np.dot(q2.T, q2)-np.eye(6)))
plt.figure()
plt.plot(q2, lw=2)
plt.show()
| bsd-3-clause |
dfci/pancanmet_analysis | oldcode/pancan/comparemets.py | 1 | 4633 | # A script to compare differential abundance patterns in metabolites across cancer types
import os, sys, pandas as pd, numpy as np, scipy as sp, csv, scipy.stats as st, matplotlib.pyplot as plt, matplotlib.cm as cm, itertools as it, pdb, seaborn as sns
sns.set_style('ticks')
sys.path.append('..')
import reportertools as rt
norm = rt.MidpointNormalize(midpoint=0) # Normalize colormaps
plt.close('all')
plt.ion()
#########################################################################################
# Input parameters
#########################################################################################
dropna = int(sys.argv[1])
#########################################################################################
# Read in the pancan metabolite file
alldata = pd.io.parsers.read_csv('../../data/merged_metabolomics/alldata.csv', index_col = 0, header = 0)
# Make sure to drop first two rows
studylist = alldata.loc['Study']
tissuelist = alldata.loc['TissueType']
if dropna == 1:
alldata = alldata.dropna()
# Define the studies you are interested in, or leave as 'all' to do all
#uqstudy = ['KIRC','ccpap']
uqstudy = 'all'
if uqstudy == 'all':
# Find the unique kinds of studies
uqstudy = np.unique( studylist )
uqstudy = [item for item in uqstudy if item !='nan']
alldata = alldata.ix[2:,:]
else:
colidx = [item for item in range(len(studylist)) if studylist[item] in uqstudy]
alldata = alldata[colidx]
studylist = [studylist[item] for item in colidx]
tissuelist = [tissuelist[item] for item in colidx]
alldata = alldata.ix[2:,:]
# Create a numpy array to store the results
res_t = np.zeros(( alldata.shape[0], len(uqstudy) ))
res_p = np.ones(( alldata.shape[0], len(uqstudy) ))
res_t[:] = np.nan
# For each study, trim the data and then test differential abundance
studyctr = 0
for study in uqstudy:
idx = [item for item in range(len(studylist)) if studylist[item] == study]
# Retain only non-NA data
d = alldata.ix[:,idx]
# Find tumor and normal indices
temptissue = [tissuelist[item] for item in idx]
tumoridx = [item for item in range(len(temptissue)) if (temptissue[item].upper() == 'TUMOR' or temptissue[item].upper() == 'MET')]
normalidx = [item for item in range(len(temptissue)) if temptissue[item].upper() == 'NORMAL']
# For each metabolite, evaluate if we have enough samples, if so, calculate z score and do Mann Whitney U Test
for met in range(d.shape[0]):
currdata = d.ix[met,:]
numdata = np.where( currdata.isnull() )[0]
tumornna = np.intersect1d( tumoridx, np.where( currdata.notnull() )[0] )
normalnna = np.intersect1d( normalidx, np.where( currdata.notnull() )[0] )
if (len(numdata) > 0.5*len(idx) or len(tumornna)<2 or len(normalnna)<2 ):
res_t[met,studyctr] = 0
res_p[met,studyctr] = 1
else:
temp1 = [float(item) for item in currdata[tumornna]]
temp2 = [float(item) for item in currdata[normalnna]]
stdtumor = np.std( temp1 )
stdnormal = np.std( temp2 )
if (stdtumor > 1e-10 and stdnormal > 1e-10):
res_p[met,studyctr] = -np.log10( st.mannwhitneyu( temp1,temp2 )[1] )
if res_p[met,studyctr] > -np.log10(0.05):
res_t[met,studyctr] = st.ttest_ind( temp1,temp2 )[0]
else:
res_t[met,studyctr] = 0
#if np.isnan( res_t[met,studyctr] ):
# pdb.set_trace()
else:
res_t[met,studyctr] = 0
res_p[met,studyctr] = 1
studyctr += 1
# If we are not dropping NA's, at least filter out rows which don't have at least X studies measuring the metabolite
if dropna == 0:
tsums = np.sum( np.abs( np.sign( res_t ) ), axis = 1 )
nzt = np.where( tsums >= 5 )[0]
res_p = res_p[nzt,:]
res_t = res_t[nzt,:]
metnames = [alldata.index[item] for item in nzt]
# Write the results to files
fcfile = pd.DataFrame(res_t,index = metnames, columns = uqstudy)
pfile = pd.DataFrame(res_p,index = metnames, columns = uqstudy)
#fcfile.to_csv('../../data/merged_metabolomics/fc.csv')
#pfile.to_csv('../../data/merged_metabolomics/res_p.csv')
else:
metnames = alldata.index
# Plot results
plt.figure( 1 )
plt.imshow( res_p, interpolation = 'nearest',cmap=cm.seismic, aspect='auto' )
plt.xticks( range(len(uqstudy)), uqstudy, fontsize=10 )
plt.yticks( range(len(metnames)), metnames , fontsize = 6 )
plt.colorbar()
# Plot results
order = np.argsort( np.average( np.sign(res_t), axis = 1 ) )
plt.figure( 2 )
plt.imshow( res_t[order], norm = norm, interpolation = 'nearest',cmap=cm.seismic, aspect='auto' )
plt.xticks( range(len(uqstudy)), uqstudy, fontsize=10 )
plt.yticks( range(len(metnames)), [metnames[item] for item in order] , fontsize = 6 )
plt.colorbar() | lgpl-3.0 |
samzhang111/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
gticket/scikit-neuralnetwork | sknn/ae.py | 8 | 5851 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
__all__ = ['AutoEncoder', 'Layer']
import time
import logging
import itertools
log = logging.getLogger('sknn')
import sklearn
from . import nn, backend
class Layer(nn.Layer):
"""
Specification for a layer to be passed to the auto-encoder during construction. This
includes a variety of parameters to configure each layer based on its activation type.
Parameters
----------
activation: str
Select which activation function this layer should use, as a string. Specifically,
options are ``Sigmoid`` and ``Tanh`` only for such auto-encoders.
type: str, optional
The type of encoding and decoding layer to use, specifically ``denoising`` for randomly
corrupting data, and a more traditional ``autoencoder`` which is used by default.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
units: int
The number of units (also known as neurons) in this layer. This applies to all
layer types except for convolution.
cost: string, optional
What type of cost function to use during the layerwise pre-training. This can be either
``msre`` for mean-squared reconstruction error (default), and ``mbce`` for mean binary
cross entropy.
tied_weights: bool, optional
Whether to use the same weights for the encoding and decoding phases of the simulation
and training. Default is ``True``.
corruption_level: float, optional
The ratio of inputs to corrupt in this layer; ``0.25`` means that 25% of the inputs will be
corrupted during the training. The default is ``0.5``.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(self,
activation,
warning=None,
type='autoencoder',
name=None,
units=None,
cost='msre',
tied_weights=True,
corruption_level=0.5):
assert warning is None, \
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['denoising', 'autoencoder']:
raise NotImplementedError("AutoEncoder layer type `%s` is not implemented." % type)
if cost not in ['msre', 'mbce']:
raise NotImplementedError("Error type '%s' is not implemented." % cost)
if activation not in ['Sigmoid', 'Tanh']:
raise NotImplementedError("Activation type '%s' is not implemented." % activation)
self.activation = activation
self.type = type
self.name = name
self.units = units
self.cost = cost
self.tied_weights = tied_weights
self.corruption_level = corruption_level
class AutoEncoder(nn.NeuralNetwork, sklearn.base.TransformerMixin):
def _setup(self):
assert not self.is_initialized,\
"This auto-encoder has already been initialized."
backend.setup()
self._backend = backend.AutoEncoderBackend(self)
def fit(self, X):
"""Fit the auto-encoder to the given data using layerwise training.
Parameters
----------
X : array-like, shape (n_samples, n_inputs)
Training vectors as real numbers, where ``n_samples`` is the number of
samples and ``n_inputs`` is the number of input features.
Returns
-------
self : object
Returns this instance.
"""
num_samples, data_size = X.shape[0], X.size
log.info("Training on dataset of {:,} samples with {:,} total size.".format(num_samples, data_size))
if self.n_iter:
log.debug(" - Terminating loop after {} total iterations.".format(self.n_iter))
if self.n_stable:
log.debug(" - Early termination after {} stable iterations.".format(self.n_stable))
if self.verbose:
log.debug("\nEpoch Validation Error Time"
"\n-------------------------------------")
self._backend._fit_impl(X)
return self
def transform(self, X):
"""Encode the data via the neural network, as an upward pass simulation to
generate high-level features from the low-level inputs.
Parameters
----------
X : array-like, shape (n_samples, n_inputs)
Input data to be passed through the auto-encoder upward.
Returns
-------
y : numpy array, shape (n_samples, n_features)
Transformed output array from the auto-encoder.
"""
return self._backend._transform_impl(X)
def transfer(self, nn):
assert not nn.is_initialized,\
"Target multi-layer perceptron has already been initialized."
for a, l in zip(self.layers, nn.layers):
assert a.activation == l.type,\
"Mismatch in activation types in target MLP; expected `%s` but found `%s`."\
% (a.activation, l.type)
assert a.units == l.units,\
"Different number of units in target MLP; expected `%i` but found `%i`."\
% (a.units, l.units)
self._backend._transfer_impl(nn)
| bsd-3-clause |
andaag/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
pypot/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
zmlabe/IceVarFigs | Scripts/SeaIce/plot_AMSR2_SIC_Ant.py | 1 | 4124 | """
Plots JAXA AMSR2 3.125 km (UHH-Processed) Sea Ice Concentration Data
Source : http://osisaf.met.no/p/ice/
Author : Zachary Labe
Date : 27 February 2017
"""
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import urllib.request as UL
import numpy as np
import datetime
import gzip
import cmocean
### Directory and time
directory = './Data/'
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentyr = str(now.year)
for i in range(14,31): ### enter days
currentdy = str(i+1)
currentmn = '08'
if int(currentdy) < 10:
currentdy = '0' + currentdy
currentyr = str(now.year)
currenttime = currentmn + '_' + str(currentdy) + '_' + currentyr
titletime = currentmn + '/' + str(currentdy) + '/' + currentyr
print('\n' 'Current Time = %s' '\n' % titletime)
### Pick data set
url = 'ftp://ftp-projects.cen.uni-hamburg.de/seaice/AMSR2/3.125km/'
filename = 'Ant_%s%s%s_res3.125_pyres.nc.gz' % (currentyr,currentmn,currentdy)
filenameout = 'Arc_AMSR2_SIC.nc'
UL.urlretrieve(url+filename, directory + filename)
inF = gzip.open(directory + filename, 'rb')
outF = open(directory + filenameout, 'wb')
outF.write( inF.read() )
inF.close()
outF.close()
data = Dataset(directory + filenameout)
ice = data.variables['sea_ice_concentration'][:]
lat = data.variables['latitude'][:]
lon = data.variables['longitude'][:]
data.close()
ice = np.asarray(np.squeeze(ice/100.))
print('Completed: Data read!')
ice[np.where(ice <= 0.20)] = np.nan
ice[np.where((ice >= 0.999) & (ice <= 1))] = 0.999
ice[np.where(ice > 1)] = np.nan
ice = ice*100.
print('Completed: Ice masked!')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='darkgrey')
plt.rc('xtick',color='darkgrey')
plt.rc('ytick',color='darkgrey')
plt.rc('axes',labelcolor='darkgrey')
plt.rc('axes',facecolor='black')
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
fig = plt.figure()
ax = fig.add_subplot(111)
### Enter lat/lon
m = Basemap(projection='spstere',boundinglat=-56,lon_0=180,resolution='l',
round=True,area_thresh=10000)
m.drawcoastlines(color = 'r',linewidth=2.5)
m.drawmapboundary(color='k')
m.drawlsmask(land_color='k',ocean_color='k')
cs = m.contourf(lon,lat,ice[:,:],np.arange(20,100.01,2),extend='min',latlon=True)
cmap = cmocean.cm.ice
cs.set_cmap(cmap)
m.fillcontinents(color='k')
cbar = m.colorbar(cs,location='right',pad = 0.2)
cbar.outline.set_edgecolor('k')
barlim = np.arange(20,101,10)
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)) )
cbar.set_label(r'\textbf{Concentration (\%)}',fontsize=13,
alpha=1,color='darkgrey')
cbar.ax.tick_params(axis='y', size=.01)
fig.suptitle(r'\textbf{ANTARCTIC SEA ICE -- %s}' % titletime,
fontsize=22,color='darkgrey',alpha=1)
plt.annotate(r'\textbf{DATA:} AMSR2 3.125 km (JAXA/Uni Hamburg-Processing)',xy=(250,100),
xycoords='figure pixels',color='darkgrey',fontsize=6,
alpha=1,rotation=0)
plt.annotate(r'\textbf{SOURCE:} http://icdc.cen.uni-hamburg.de/daten/cryosphere.html',xy=(250,80),
xycoords='figure pixels',color='darkgrey',fontsize=6,
alpha=1,rotation=0)
plt.annotate(r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',xy=(250,60),
xycoords='figure pixels',color='darkgrey',fontsize=6,
alpha=1,rotation=0)
plt.tight_layout()
fig.subplots_adjust(top=0.89)
print('Completed: Figure plotted!')
plt.savefig(directoryfigure + 'seaiceconc_%s.png' % currenttime, dpi=300)
print('Completed: Script done!') | mit |
tobgu/qcache | qcache/qframe/pandas_filter.py | 1 | 4697 | from __future__ import unicode_literals
import operator
import numpy
from qcache.qframe.common import assert_list, raise_malformed, is_quoted, unquote, assert_len
from qcache.qframe.constants import COMPARISON_OPERATORS
from qcache.qframe.context import get_current_qframe
JOINING_OPERATORS = {'&': operator.and_,
'|': operator.or_}
def _leaf_node(df, q):
if isinstance(q, basestring):
if is_quoted(q):
return q[1:-1].encode('utf-8')
try:
return df[q]
except KeyError:
raise_malformed("Unknown column", q)
return q
def _bitwise_filter(df, q):
assert_len(q, 3)
op, column, arg = q
if not isinstance(arg, (int, long)):
raise_malformed('Invalid argument type, must be an integer:'.format(t=type(arg)), q)
try:
series = df[column] & arg
if op == "any_bits":
return series > 0
return series == arg
except TypeError:
raise_malformed("Invalid column type, must be an integer", q)
def _not_filter(df, q):
assert_len(q, 2, "! is a single arity operator, invalid number of arguments")
return ~_do_pandas_filter(df, q[1])
def _isnull_filter(df, q):
assert_len(q, 2, "isnull is a single arity operator, invalid number of arguments")
# Slightly hacky but the only way I've come up with so far.
return df[q[1]] != df[q[1]]
def _comparison_filter(df, q):
assert_len(q, 3)
op, col_name, arg = q
return COMPARISON_OPERATORS[op](df[col_name], _do_pandas_filter(df, arg))
def _join_filter(df, q):
result = None
if len(q) < 2:
raise_malformed("Invalid number of arguments", q)
elif len(q) == 2:
# Conjunctions and disjunctions with only one clause are OK
result = _do_pandas_filter(df, q[1])
else:
result = reduce(lambda l, r: JOINING_OPERATORS[q[0]](l, _do_pandas_filter(df, r)),
q[2:], _do_pandas_filter(df, q[1]))
return result
def prepare_in_clause(q):
"""
The arguments to an in expression may be either a list of values or
a sub query which is then executed to produce a list of values.
"""
assert_len(q, 3)
_, col_name, args = q
if isinstance(args, dict):
# Sub query, circular dependency on query by nature so need to keep the import local
from qcache.qframe import query
current_qframe = get_current_qframe()
sub_df, _ = query(current_qframe.df, args)
try:
args = sub_df[col_name].values
except KeyError:
raise_malformed('Unknown column "{}"'.format(col_name), q)
if not isinstance(args, (list, numpy.ndarray)):
raise_malformed("Second argument must be a list", q)
return col_name, args
def _in_filter(df, q):
col_name, args = prepare_in_clause(q)
return df[col_name].isin(args)
def _like_filter(df, q):
assert_len(q, 3)
op, column, raw_expr = q
if not is_quoted(raw_expr):
raise_malformed("like expects a quoted string as second argument", q)
regexp = unquote(raw_expr)
if not regexp.startswith('%'):
regexp = '^' + regexp
else:
regexp = regexp[1:]
if not regexp.endswith('%'):
regexp += '$'
else:
regexp = regexp[:-1]
# 'like' is case sensitive, 'ilike' is case insensitive
case = op == 'like'
try:
return df[column].str.contains(regexp, case=case, na=False)
except AttributeError:
raise_malformed("Invalid column type for (i)like", q)
def _do_pandas_filter(df, q):
if not isinstance(q, list):
return _leaf_node(df, q)
if not q:
raise_malformed("Empty expression not allowed", q)
result = None
op = q[0]
try:
if op in ('any_bits', 'all_bits'):
result = _bitwise_filter(df, q)
elif op == "!":
result = _not_filter(df, q)
elif op == "isnull":
result = _isnull_filter(df, q)
elif op in COMPARISON_OPERATORS:
result = _comparison_filter(df, q)
elif op in JOINING_OPERATORS:
result = _join_filter(df, q)
elif op == 'in':
result = _in_filter(df, q)
elif op in ('like', 'ilike'):
result = _like_filter(df, q)
else:
raise_malformed("Unknown operator", q)
except KeyError:
raise_malformed("Column is not defined", q)
except TypeError:
raise_malformed("Invalid type in argument", q)
return result
def pandas_filter(df, filter_q):
if filter_q:
assert_list('where', filter_q)
return df[_do_pandas_filter(df, filter_q)]
return df
| mit |
princeofdarkness76/libcmaes | python/surrptest.py | 2 | 2842 | import lcmaes
import numpy as np
import sys
import math
# input parameters for a 10-D problem
x0 = 2
dim = 10
x = [x0]*dim
lambda_ = 10 # lambda is a reserved keyword in python, using lambda_ instead.
seed = 0 # 0 for seed auto-generated within the lib.
sigma = 0.1
p = lcmaes.make_simple_parameters(x,sigma,lambda_,seed)
p.set_str_algo("cmaes")
#p.set_max_iter(20)
p.set_ftarget(1e-3)
p.set_quiet(False)
# surrogate
tree_max_depth = 100000
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import NuSVR
#clf = DecisionTreeRegressor(max_depth=tree_max_depth)
#clf = RandomForestRegressor(n_estimators=100)
#clf = GradientBoostingRegressor()
clf = NuSVR(C=1.0)
def encode(x,Cinv):
xmean = np.mean(x,axis=0)
xxmean = []
for xi in x:
xxmean.append(list(xi - xmean))
nx = []
for xi in xxmean:
nx.append(list(Cinv.dot(xi)))
return nx
def traindt(x,y):
global clf
#print "training surrogate"
#clft = DecisionTreeRegressor(max_depth=tree_max_depth,splitter='random')
#clft = RandomForestRegressor()
#clft = GradientBoostingRegressor(loss='lad',n_estimators=50,learning_rate=0.3,max_depth=2)
clft = NuSVR(C=1e6)
clf = clft.fit(x,y)
def predict(x):
global clf
# print "predicting from surrogate"
y = clf.predict(x)
return y
# objective function.
def fsphere(x,n):
val = 0.0
for i in range(0,n):
val += x[i]*x[i]
return val
def rosen(x,n):
val = 0.0
for i in range(0,n-1):
val += 100.0*pow(x[i+1]-x[i]*x[i],2) + pow(x[i]-1.0,2)
return val
# generate a function object
objfunc = lcmaes.fitfunc_pbf.from_callable(rosen)
# surrogate
def ncsurrfunc(c,m):
x = []
y = []
for ci in c:
x.append(lcmaes.get_candidate_x(ci))
y.append(ci.get_fvalue())
#nx = encode(x,m)
traindt(x,y)
return 1
trainfunc = lcmaes.csurrfunc_pbf.from_callable(ncsurrfunc)
def nsurrfunc(c,m):
x = []
for ci in c:
x.append(lcmaes.get_candidate_x(ci))
#nx = encode(x,m)
y = predict(x)
i = 0
for ci in c:
ci.set_fvalue(y[i])
i = i + 1
return 1
predictfunc = lcmaes.surrfunc_pbf.from_callable(nsurrfunc)
# pass the function and parameter to ACM surrogate cmaes, run optimization with 200 training points and collect solution object.
cmasols = lcmaes.surrpcmaes(objfunc,trainfunc,predictfunc,p,True,200)
# collect and inspect results
bcand = cmasols.best_candidate()
bx = lcmaes.get_candidate_x(bcand)
print("best x=",bx," / fevals=",cmasols.fevals())
print("distribution mean=",lcmaes.get_solution_xmean(cmasols))
cov = lcmaes.get_solution_cov(cmasols) # numpy array
#print "cov=",cov
print("elapsed time=",cmasols.elapsed_time(),"ms")
| lgpl-3.0 |
DTMilodowski/EOlab | src/prepare_EOlab_layers.py | 1 | 22185 | import numpy as np
from matplotlib import pyplot as plt
from osgeo import gdal
import os
import osr
import sys
from netCDF4 import Dataset
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib import ticker
import colour_tools as clt
import scipy
from scipy import ndimage, signal
from matplotlib import rcParams
# Set up some basiic parameters for the plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 9
rcParams['legend.numpoints'] = 1
axis_size = rcParams['font.size']
# This is a super simple function that loads in a NetCDF file and pulls out the important coordinate
# system info that is needed for writing georeferenced GeoTIFFs. Since NetCDF files will vary in
# terms of their dimensionality and number of variables, subsequent processing to GeoTIFF layers has
# been separated out into a separate function
def load_NetCDF(NetCDF_file,lat_var = 'lat', lon_var = 'lon'):
dataset = Dataset(NetCDF_file)
# Get the spatial information from the layer
Lat = dataset.variables[lat_var][:]
Long = dataset.variables[lon_var][:]
DataResX = np.abs(Long[0]-Long[1])
DataResY = np.abs(Lat[0]-Lat[1])
XMinimum = np.min(Long)-DataResX/2.
YMinimum = np.min(Lat)-DataResY/2.
YMaximum = np.max(Lat)+DataResY/2.
#geoTransform = [ XMinimum, DataResX, 0, YMinimum, 0, DataResY ]
geoTransform = [ XMinimum, DataResX, 0, YMaximum, 0, -DataResY ]
return dataset, geoTransform
# A function to resample an array to a higher resolution. The resampling is specified using an scalar,
# which should be an integer, and represents the number of divisions each cell is to be split into.
# This only resamples to higher resolutions, and does not deal with aggregating to lower resolutions.
# The main reason for using this is to increase the accuracy of area based queries using polygon
# shapefiles in EO lab applications. vars is a list of variables which you'd like to resample
def resample_dataset(dataset,geoTransform,vars,resampling_scalar):
ds = {}
for vv in range(0,len(vars)):
print(vars[vv])
ds[vars[vv]], geoTrans = resample_array(np.asarray(dataset.variables[vars[vv]]),geoTransform,resampling_scalar)
return ds, geoTrans
def resample_array(array,geoTransform,resampling_scalar):
rs = resampling_scalar
rows,cols = array.shape
array_temp = np.zeros((rows*rs,cols*rs))
# Fill the new array with the original values
array_temp[::rs,::rs] = array
# Define the convolution kernel
kernel_1d = scipy.signal.boxcar(rs)
kernel_2d = np.outer(kernel_1d, kernel_1d)
# Apply the kernel by convolution, seperately in each axis
array_out = scipy.signal.convolve(array_temp, kernel_2d, mode="same")
#for ii in range(0,rows):
# for jj in range(0,cols):
# array_out[ii*rs:(ii+1)*rs,jj*rs:(jj+1)*rs]=array[ii,jj]
geoTrans = [geoTransform[0], geoTransform[1]/float(rs), geoTransform[2], geoTransform[3], geoTransform[4], geoTransform[5]/float(rs)]
return array_out, geoTrans
# Function to load a GeoTIFF band plus georeferencing information. Only loads one band,
# which is band 1 by default
def load_GeoTIFF_band_and_georeferencing(File,band_number=1):
driver = gdal.GetDriverByName('GTiff')
driver.Register()
try:
ds = gdal.Open(File)
except (RuntimeError, e):
print ('unable to open ' + File)
print (e)
sys.exit(1)
source_band = ds.GetRasterBand(band_number)
if source_band is None:
print( "BAND MISSING")
sys.exit(1)
array = np.array(ds.GetRasterBand(band_number).ReadAsArray(),dtype=np.float64)
geoTrans = ds.GetGeoTransform()
coord_sys = ds.GetProjectionRef()
return array, geoTrans, coord_sys
# Convert a python array with float variables into a three-band RGB array with the colours specified
# according to a given colormap and upper and lower limits to the colormap
def convert_array_to_rgb(array, cmap, ulim, llim, nodatavalue=-9999):
norm = mpl.colors.Normalize(vmin=llim, vmax=ulim)
rgb_array= cm.ScalarMappable(norm=norm,cmap=cmap).to_rgba(array)[:,:,:-1]*255
mask = np.any((~np.isfinite(array),array==nodatavalue),axis=0)
rgb_array[mask,:]=np.array([255.,0.,255.])
return rgb_array
# Function to write an EO lab data layer from an array
def write_array_to_data_layer_GeoTiff(array,geoTrans, OUTFILE_prefix, EPSG_CODE='4326', north_up=True):
NBands = 1
NRows = 0
NCols = 0
if north_up:
# for north_up array, need the n-s resolution (element 5) to be negative
if geoTrans[5]>0:
geoTrans[5]*=-1
geoTrans[3] = geoTrans[3]-(array.shape[0]+1.)*geoTrans[5]
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
(NRows,NCols) = array.shape
array = np.flipud(array)
elif len(array.shape) == 3:
(NRows,NCols,NBands) = array.shape
for i in range(0,NBands):
array[:,:,i] = np.flipud(array[:,:,i])
else:
print('array has too many dimensions! Unable to write to raster')
sys.exit(1)
else:
# for north_up array, need the n-s resolution (element 5) to be positive
if geoTrans[5]<0:
geoTrans[5]*=-1
geoTrans[3] = geoTrans[3]-(array.shape[0]+1.)*geoTrans[5]
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
(NRows,NCols) = array.shape
array = np.flipud(array)
elif len(array.shape) == 3:
(NRows,NCols,NBands) = array.shape
for i in range(0,NBands):
array[:,:,i] = np.flipud(array[:,:,i])
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print ('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
(NRows,NCols) = array.shape
array = np.flipud(array)
elif len(array.shape) == 3:
(NRows,NCols,NBands) = array.shape
for i in range(0,NBands):
array[:,:,i] = np.flipud(array[:,:,i])
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
# Write GeoTiff
driver = gdal.GetDriverByName('GTiff')
driver.Register()
# set all the relevant geospatial information
dataset = driver.Create( OUTFILE_prefix+'_data.tif', NCols, NRows, NBands, gdal.GDT_Float32 )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE )
dataset.SetProjection( srs.ExportToWkt() )
# write array
dataset.GetRasterBand(1).SetNoDataValue( -9999 )
dataset.GetRasterBand(1).WriteArray( array )
dataset = None
return 0
# This function is similar to before, except that now it writes two GeoTIFFs - a data layer and a
# display layer. For the moment, this can only accept a 2D input array
def write_array_to_display_layer_GeoTiff(array, geoTrans, OUTFILE_prefix, cmap, ulim, llim, EPSG_CODE_DATA='4326', EPSG_CODE_DISPLAY='3857', north_up=True):
NBands = 1
NBands_RGB = 3
NRows = 0
NCols = 0
if north_up:
# for north_up array, need the n-s resolution (element 5) to be negative
if geoTrans[5]>0:
geoTrans[5]*=-1
geoTrans[3] = geoTrans[3]-(array.shape[0]+1.)*geoTrans[5]
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print ('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
(NRows,NCols) = array.shape
array = np.flipud(array)
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
else:
# for north_up array, need the n-s resolution (element 5) to be positive
if geoTrans[5]<0:
geoTrans[5]*=-1
geoTrans[3] = geoTrans[3]-(array.shape[0]+1.)*geoTrans[5]
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
(NRows,NCols) = array.shape
array = np.flipud(array)
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
(NRows,NCols) = array.shape
array = np.flipud(array)
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
# Convert RGB array
rgb_array = convert_array_to_rgb(array,cmap,ulim,llim)
# Write Data Layer GeoTiff
driver = gdal.GetDriverByName('GTiff')
driver.Register()
# set all the relevant geospatial information
dataset = driver.Create( OUTFILE_prefix+'_data.tif', NCols, NRows, NBands, gdal.GDT_Float32 )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE_DATA )
dataset.SetProjection( srs.ExportToWkt() )
# write array
dataset.GetRasterBand(1).SetNoDataValue( -9999 )
dataset.GetRasterBand(1).WriteArray( array )
dataset = None
# Write Display Layer GeoTiff
driver = gdal.GetDriverByName('GTiff')
driver.Register()
# set all the relevant geospatial information
temp_file = "temp_%.0f.tif" % (np.random.random()*10**9)
dataset = driver.Create( temp_file, NCols, NRows, NBands_RGB, gdal.GDT_Byte )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE_DATA )
dataset.SetProjection( srs.ExportToWkt() )
# write array
for bb in range(0,3):
dataset.GetRasterBand(bb+1).WriteArray( rgb_array[:,:,bb] )
dataset = None
# now use gdalwarp to reproject
os.system("gdalwarp -t_srs EPSG:" + EPSG_CODE_DISPLAY + " " + temp_file + " " + OUTFILE_prefix+'_display.tif')
os.system("rm %s" % temp_file)
return 0
# A function to produce a simple map legend for quantitative data layers
def plot_legend(cmap,ulim,llim,axis_label, OUTFILE_prefix,extend='neither'):
norm = mpl.colors.Normalize(vmin=llim, vmax=ulim)
#plt.figure(1, facecolor='White',figsize=[2, 1])
fig,ax = plt.subplots(facecolor='White',figsize=[2, 1])
ax = plt.subplot2grid((1,1),(0,0))
cb = mpl.colorbar.ColorbarBase(ax,cmap=cmap,norm=norm,orientation='horizontal',extend=extend)
tick_locator = ticker.MaxNLocator(nbins=5)
cb.locator = tick_locator
cb.update_ticks()
cb.set_label(axis_label,fontsize = axis_size)
plt.tight_layout()
plt.savefig(OUTFILE_prefix+'_legend.png')
#plt.show()
return 0
def plot_legend_listed(cmap,labels,axis_label, OUTFILE_prefix,figsize=[1.5,1]):
bounds = np.arange(len(labels)+1)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
#plt.figure(1, facecolor='White',figsize=[1.5, 1])
fig,ax = plt.subplots(facecolor='White',figsize=figsize)
ax = plt.subplot2grid((1,1),(0,0))
cb = mpl.colorbar.ColorbarBase(ax,cmap=cmap,norm=norm,
orientation='vertical')
n_class = labels.size
loc = np.arange(0,n_class)+0.5
cb.set_ticks(loc)
cb.set_ticklabels(labels)
cb.update_ticks()
ax.set_title(axis_label,fontsize = axis_size)
plt.tight_layout()
plt.savefig(OUTFILE_prefix+'_legend.png')
#plt.show()
return 0
"""
DATA IO VARIANTS FOR XARRAY
"""
"""
load_geotiff_to_xarray
A very simple function that reads a geotiff and returns it as an xarray. Nodata
values are converted to the numpy nodata value.
The input arguments are:
- filename (this should include the full path to the file)
Optional arguments are:
- band (default = 1)
- x_name (default = 'longitude')
- y_name (default = 'latitude')
- nodata_option (default = 0).
0: use value in dataset metadata. This is usually fine, except if
there is an issue with the precision, and is applied in all
cases.
1: arbitrary cutoff for nodata to account for precision problems
with float32. Other similar options could be added if necessary.
2: set all negative values as nodata
"""
def load_geotiff_to_xarray(filename, band = 1,x_name='longitude',y_name='latitude',option=0):
xarr = xr.open_rasterio(filename).sel(band=band)
if(option==0):
xarr.values[xarr.values==xarr.nodatavals[0]]=np.nan
if(option==1):
xarr.values[xarr.values<-3*10**38]=np.nan
if(option==2):
xarr.values[xarr.values<0]=np.nan
return xarr #return the xarray object
"""
copy_xarray_template to create a new array
"""
def copy_xarray_template(xarr):
xarr_new = xarr.copy()
xarr_new.values = np.zeros(xarr.values.shape)*np.nan
return xarr_new
"""
create a geoTransform object from xarray info
"""
def create_geoTrans(array,x_name='x',y_name='y'):
lat = array.coords[y_name].values
lon = array.coords[x_name].values
dlat = lat[1]-lat[0]
dlon = lon[1]-lon[0]
geoTrans = [0,dlon,0,0,0,dlat]
geoTrans[0] = np.min(lon)-dlon/2.
if geoTrans[5]>0:
geoTrans[3]=np.min(lat)-dlat/2.
else:
geoTrans[3]=np.max(lat)-dlat/2.
return geoTrans
"""
check array orientation based on metadata and flip so that it will display
correctly on the EOlab platform
"""
def check_array_orientation(array,geoTrans,north_up=True):
if north_up:
# for north_up array, need the n-s resolution (element 5) to be negative
if geoTrans[5]>0:
geoTrans[5]*=-1
geoTrans[3] = geoTrans[3]-(array.shape[0]+1.)*geoTrans[5]
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
array = np.flipud(array)
elif len(array.shape) == 3:
(NRows,NCols,NBands) = array.shape
for i in range(0,NBands):
array[:,:,i] = np.flipud(array[:,:,i])
else:
print('array has too many dimensions! Unable to write to raster')
sys.exit(1)
else:
# for north_up array, need the n-s resolution (element 5) to be positive
if geoTrans[5]<0:
geoTrans[5]*=-1
geoTrans[3] = geoTrans[3]-(array.shape[0]+1.)*geoTrans[5]
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
array = np.flipud(array)
elif len(array.shape) == 3:
(NRows,NCols,NBands) = array.shape
for i in range(0,NBands):
array[:,:,i] = np.flipud(array[:,:,i])
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
# Get array dimensions and flip so that it plots in the correct orientation on GIS platforms
if len(array.shape) < 2:
print ('array has less than two dimensions! Unable to write to raster')
sys.exit(1)
elif len(array.shape) == 2:
array = np.flipud(array)
elif len(array.shape) == 3:
(NRows,NCols,NBands) = array.shape
for i in range(0,NBands):
array[:,:,i] = np.flipud(array[:,:,i])
else:
print ('array has too many dimensions! Unable to write to raster')
sys.exit(1)
return array,geoTrans
"""
# Write xarray to data layer geotiff
"""
def write_xarray_to_data_layer_GeoTiff(array, OUTFILE_prefix,EPSG_CODE='4326',north_up=True):
NBands = 1
NRows,NCols = array.values.shape
# create geotrans object
geoTrans = create_geoTrans(array,x_name=array.dims[1],y_name=array.dims[0])
# check orientation
array.values,geoTrans = check_array_orientation(array.values,geoTrans,north_up=north_up)
# set nodatavalue
array.values[np.isnan(array.values)] = -9999
# Write GeoTiff
driver = gdal.GetDriverByName('GTiff'); driver.Register()
# set all the relevant geospatial information
dataset = driver.Create( OUTFILE_prefix+'.tif', NCols, NRows, NBands, gdal.GDT_Float32 )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE )
dataset.SetProjection( srs.ExportToWkt() )
# write array
dataset.GetRasterBand(1).SetNoDataValue( -9999 )
dataset.GetRasterBand(1).WriteArray( array.values )
dataset = None
return 0
"""
# Write xarray to data and display layer geotiffs
"""
def write_xarray_to_display_layer_GeoTiff(array, OUTFILE_prefix, cmap, ulim, llim, EPSG_CODE_DATA='4326', EPSG_CODE_DISPLAY='3857', north_up=True):
NBands = 1
NBands_RGB = 3
NRows,NCols = array.values.shape
# create geotrans object
geoTrans = create_geoTrans(array,x_name=array.dims[1],y_name=array.dims[0])
# check orientation
array.values,geoTrans = check_array_orientation(array.values,geoTrans,north_up=north_up)
# set nodatavalue
array.values[np.isnan(array.values)] = -9999
# Convert RGB array
rgb_array = convert_array_to_rgb(array.values,cmap,ulim,llim)
# Write Data Layer GeoTiff
driver = gdal.GetDriverByName('GTiff'); driver.Register()
# set all the relevant geospatial information
dataset = driver.Create( OUTFILE_prefix+'_data.tif', NCols, NRows, NBands, gdal.GDT_Float32 )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE_DATA )
dataset.SetProjection( srs.ExportToWkt() )
dataset.GetRasterBand(1).SetNoDataValue( -9999 )
dataset.GetRasterBand(1).WriteArray( array.values )
dataset = None
# Write Display Layer GeoTiff
driver = gdal.GetDriverByName('GTiff')
driver.Register()
# set all the relevant geospatial information
temp_file = "temp_%.0f.tif" % (np.random.random()*10**9)
dataset = driver.Create( temp_file, NCols, NRows, NBands_RGB, gdal.GDT_Byte )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE_DATA )
dataset.SetProjection( srs.ExportToWkt() )
for bb in range(0,3):
dataset.GetRasterBand(bb+1).WriteArray( rgb_array[:,:,bb] )
dataset = None
# now use gdalwarp to reproject via the command line
os.system("gdalwarp -t_srs EPSG:" + EPSG_CODE_DISPLAY + " " + temp_file + " " + OUTFILE_prefix+'_display.tif')
os.system("rm %s" % temp_file)
return 0
"""
# Write xarray to data and display layer geotiffs
# Applies greyscale (perceived luminosity) to areas defined by specified mask
"""
def write_xarray_to_display_layer_confidence_levels_GeoTiff(array, mask, OUTFILE_prefix, cmap, ulim, llim, EPSG_CODE_DATA='4326', EPSG_CODE_DISPLAY='3857', north_up=True):
NBands = 1
NBands_RGB = 3
NRows,NCols = array.values.shape
# create geotrans object
geoTrans = create_geoTrans(array,x_name=array.dims[1],y_name=array.dims[0])
# check orientation
array.values,geoTrans = check_array_orientation(array.values,geoTrans,north_up=north_up)
# set nodatavalue
array.values[np.isnan(array.values)] = -9999
# Convert RGB array
rgb_array = convert_array_to_rgb(array.values,cmap,ulim,llim)
rgb_luminosity_array = convert_array_to_rgb(array.values,clt.cmap_to_perceived_luminosity(cmap),ulim,llim)
mask = mask.reshape(mask.shape[0],mask.shape[1],1)
rgb_mask = np.concatenate((mask,mask,mask),axis=2)
rgb_array[rgb_mask] = rgb_luminosity_array[rgb_mask]
# Write Data Layer GeoTiff
driver = gdal.GetDriverByName('GTiff'); driver.Register()
# set all the relevant geospatial information
dataset = driver.Create( OUTFILE_prefix+'_confidence_data.tif', NCols, NRows, NBands, gdal.GDT_Float32 )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE_DATA )
dataset.SetProjection( srs.ExportToWkt() )
dataset.GetRasterBand(1).SetNoDataValue( -9999 )
dataset.GetRasterBand(1).WriteArray( array.values )
dataset = None
# Write Display Layer GeoTiff
driver = gdal.GetDriverByName('GTiff')
driver.Register()
# set all the relevant geospatial information
temp_file = "temp_%.0f.tif" % (np.random.random()*10**9)
dataset = driver.Create( temp_file, NCols, NRows, NBands_RGB, gdal.GDT_Byte )
dataset.SetGeoTransform( geoTrans )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'EPSG:'+EPSG_CODE_DATA )
dataset.SetProjection( srs.ExportToWkt() )
for bb in range(0,3):
dataset.GetRasterBand(bb+1).WriteArray( rgb_array[:,:,bb] )
dataset = None
# now use gdalwarp to reproject via the command line
os.system("gdalwarp -t_srs EPSG:" + EPSG_CODE_DISPLAY + " " + temp_file + " " + OUTFILE_prefix+'_confidence_display.tif')
os.system("rm %s" % temp_file)
return 0
| gpl-3.0 |
cybernet14/scikit-learn | sklearn/utils/validation.py | 29 | 24630 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
TomAugspurger/pandas | asv_bench/benchmarks/groupby.py | 1 | 20710 | from functools import partial
from itertools import product
from string import ascii_letters
import numpy as np
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
period_range,
)
from .pandas_vb_common import tm
method_blacklist = {
"object": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"cummin",
"mean",
"max",
"skew",
"cumprod",
"cummax",
"rank",
"pct_change",
"min",
"var",
"mad",
"describe",
"std",
"quantile",
},
"datetime": {
"median",
"prod",
"sem",
"cumsum",
"sum",
"mean",
"skew",
"cumprod",
"cummax",
"pct_change",
"var",
"mad",
"describe",
"std",
},
}
class ApplyDictReturn:
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(
lambda x: {"first": x.values[0], "last": x.values[-1]}
)
class Apply:
def setup_cache(self):
N = 10 ** 4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame(
{
"key": labels,
"key2": labels2,
"value1": np.random.randn(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
}
)
return df
def time_scalar_function_multi_col(self, df):
df.groupby(["key", "key2"]).apply(lambda x: 1)
def time_scalar_function_single_col(self, df):
df.groupby("key").apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, df):
df.groupby(["key", "key2"]).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, df):
df.groupby("key").apply(self.df_copy_function)
class Groups:
param_names = ["key"]
params = ["int64_small", "int64_large", "object_small", "object_large"]
def setup_cache(self):
size = 10 ** 6
data = {
"int64_small": Series(np.random.randint(0, 100, size=size)),
"int64_large": Series(np.random.randint(0, 10000, size=size)),
"object_small": Series(
tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))
),
"object_large": Series(
tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size))
),
}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
class GroupManyLabels:
params = [1, 1000]
param_names = ["ncols"]
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth:
param_names = ["dtype"]
params = ["float32", "float64", "datetime", "object"]
def setup(self, dtype):
N = 10 ** 5
# with datetimes (GH7555)
if dtype == "datetime":
values = date_range("1/1/2011", periods=N, freq="s")
elif dtype == "object":
values = ["foo"] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({"key": key, "values": values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby("key").nth(0, dropna="any")
def time_groupby_nth_all(self, dtype):
self.df.groupby("key").nth(0, dropna="all")
def time_frame_nth(self, dtype):
self.df.groupby("key").nth(0)
def time_series_nth_any(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="any")
def time_series_nth_all(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="all")
def time_series_nth(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0)
class DateAttributes:
def setup(self):
rng = date_range("1/1/2000", "12/31/2005", freq="H")
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64:
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list("abcde")
self.df = DataFrame(arr, columns=self.cols)
self.df["jim"], self.df["joe"] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype:
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64("nat")
offsets[np.random.rand(n) > 0.5] = np.timedelta64("nat")
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list("ab"), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"dates": dates,
"value2": value2,
"value3": np.random.randn(n),
"ints": np.random.randint(0, 1000, size=n),
"obj": obj,
"offsets": offsets,
}
)
return df
def time_multi_count(self, df):
df.groupby(["key1", "key2"]).count()
class CountMultiInt:
def setup_cache(self):
n = 10000
df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"ints": np.random.randint(0, 1000, size=n),
"ints2": np.random.randint(0, 1000, size=n),
}
)
return df
def time_multi_int_count(self, df):
df.groupby(["key1", "key2"]).count()
def time_multi_int_nunique(self, df):
df.groupby(["key1", "key2"]).nunique()
class AggFunctions:
def setup_cache(self):
N = 10 ** 5
fac1 = np.array(["A", "B", "C"], dtype="O")
fac2 = np.array(["one", "two"], dtype="O")
df = DataFrame(
{
"key1": fac1.take(np.random.randint(0, 3, size=N)),
"key2": fac2.take(np.random.randint(0, 2, size=N)),
"value1": np.random.randn(N),
"value2": np.random.randn(N),
"value3": np.random.randn(N),
}
)
return df
def time_different_str_functions(self, df):
df.groupby(["key1", "key2"]).agg(
{"value1": "mean", "value2": "var", "value3": "sum"}
)
def time_different_numpy_functions(self, df):
df.groupby(["key1", "key2"]).agg(
{"value1": np.mean, "value2": np.var, "value3": np.sum}
)
def time_different_python_functions_multicol(self, df):
df.groupby(["key1", "key2"]).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby("key1").agg([sum, min, max])
class GroupStrings:
def setup(self):
n = 2 * 10 ** 5
alpha = list(map("".join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list("abcd"))
self.df["joe"] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list("abcd")).max()
class MultiColumn:
def setup_cache(self):
N = 10 ** 5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame(
{
"key1": key1,
"key2": key2,
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
return df
def time_lambda_sum(self, df):
df.groupby(["key1", "key2"]).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(["key1", "key2"]).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(["key1", "key2"])["data1"].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(["key1", "key2"])["data1"].agg(np.sum)
class Size:
def setup(self):
n = 10 ** 5
offsets = np.random.randint(n, size=n).astype("timedelta64[ns]")
dates = np.datetime64("now") + offsets
self.df = DataFrame(
{
"key1": np.random.randint(0, 500, size=n),
"key2": np.random.randint(0, 100, size=n),
"value1": np.random.randn(n),
"value2": np.random.randn(n),
"value3": np.random.randn(n),
"dates": dates,
}
)
self.draws = Series(np.random.randn(n))
labels = Series(["foo", "bar", "baz", "qux"] * (n // 4))
self.cats = labels.astype("category")
def time_multi_size(self):
self.df.groupby(["key1", "key2"]).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class GroupByMethods:
param_names = ["dtype", "method", "application"]
params = [
["int", "float", "object", "datetime"],
[
"all",
"any",
"bfill",
"count",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"describe",
"ffill",
"first",
"head",
"last",
"mad",
"max",
"min",
"median",
"mean",
"nunique",
"pct_change",
"prod",
"quantile",
"rank",
"sem",
"shift",
"size",
"skew",
"std",
"sum",
"tail",
"unique",
"value_counts",
"var",
],
["direct", "transformation"],
]
def setup(self, dtype, method, application):
if method in method_blacklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
if dtype == "int":
key = np.random.randint(0, size, size=size)
elif dtype == "float":
key = np.concatenate(
[np.random.random(ngroups) * 0.1, np.random.random(ngroups) * 10.0]
)
elif dtype == "object":
key = ["foo"] * size
elif dtype == "datetime":
key = date_range("1/1/2011", periods=size, freq="s")
df = DataFrame({"values": values, "key": key})
if application == "transform":
if method == "describe":
raise NotImplementedError
self.as_group_method = lambda: df.groupby("key")["values"].transform(method)
self.as_field_method = lambda: df.groupby("values")["key"].transform(method)
else:
self.as_group_method = getattr(df.groupby("key")["values"], method)
self.as_field_method = getattr(df.groupby("values")["key"], method)
def time_dtype_as_group(self, dtype, method, application):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
class RankWithTies:
# GH 21237
param_names = ["dtype", "tie_method"]
params = [
["float64", "float32", "int64", "datetime64"],
["first", "average", "dense", "min", "max"],
]
def setup(self, dtype, tie_method):
N = 10 ** 4
if dtype == "datetime64":
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({"values": data, "key": ["foo"] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby("key").rank(method=tie_method)
class Float32:
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
def time_sum(self):
self.df.groupby(["a"])["b"].sum()
class Categories:
def setup(self):
N = 10 ** 5
arr = np.random.random(N)
data = {"a": Categorical(np.random.randint(10000, size=N)), "b": arr}
self.df = DataFrame(data)
data = {
"a": Categorical(np.random.randint(10000, size=N), ordered=True),
"b": arr,
}
self.df_ordered = DataFrame(data)
data = {
"a": Categorical(
np.random.randint(100, size=N), categories=np.arange(10000)
),
"b": arr,
}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby("a")["b"].count()
def time_groupby_nosort(self):
self.df.groupby("a", sort=False)["b"].count()
def time_groupby_ordered_sort(self):
self.df_ordered.groupby("a")["b"].count()
def time_groupby_ordered_nosort(self):
self.df_ordered.groupby("a", sort=False)["b"].count()
def time_groupby_extra_cat_sort(self):
self.df_extra_cat.groupby("a")["b"].count()
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby("a", sort=False)["b"].count()
class Datelike:
# GH 14338
params = ["period_range", "date_range", "date_range_tz"]
param_names = ["grouper"]
def setup(self, grouper):
N = 10 ** 4
rng_map = {
"period_range": period_range,
"date_range": date_range,
"date_range_tz": partial(date_range, tz="US/Central"),
}
self.grouper = rng_map[grouper]("1900-01-01", freq="D", periods=N)
self.df = DataFrame(np.random.randn(10 ** 4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
class SumBools:
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({"ii": range(N), "bb": [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby("ii").sum()
class SumMultiLevel:
# GH 9049
timeout = 120.0
def setup(self):
N = 50
self.df = DataFrame(
{"A": list(range(N)) * 2, "B": range(N * 2), "C": 1}
).set_index(["A", "B"])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
class Transform:
def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(
levels=[np.arange(n1), tm.makeStringIndex(n2)],
codes=[np.repeat(range(n1), n2).tolist(), list(range(n2)) * n1],
names=["lev1", "lev2"],
)
arr = np.random.randn(n1 * n2, 3)
arr[::10000, 0] = np.nan
arr[1::10000, 1] = np.nan
arr[2::10000, 2] = np.nan
data = DataFrame(arr, index=index, columns=["col1", "col20", "col3"])
self.df = data
n = 20000
self.df1 = DataFrame(
np.random.randint(1, n, (n, 3)), columns=["jim", "joe", "jolie"]
)
self.df2 = self.df1.copy()
self.df2["jim"] = self.df2["joe"]
self.df3 = DataFrame(
np.random.randint(1, (n / 10), (n, 3)), columns=["jim", "joe", "jolie"]
)
self.df4 = self.df3.copy()
self.df4["jim"] = self.df4["joe"]
def time_transform_lambda_max(self):
self.df.groupby(level="lev1").transform(lambda x: max(x))
def time_transform_ufunc_max(self):
self.df.groupby(level="lev1").transform(np.max)
def time_transform_multi_key1(self):
self.df1.groupby(["jim", "joe"])["jolie"].transform("max")
def time_transform_multi_key2(self):
self.df2.groupby(["jim", "joe"])["jolie"].transform("max")
def time_transform_multi_key3(self):
self.df3.groupby(["jim", "joe"])["jolie"].transform("max")
def time_transform_multi_key4(self):
self.df4.groupby(["jim", "joe"])["jolie"].transform("max")
class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
def time_transform_mean(self):
self.df["signal"].groupby(self.g).transform(np.mean)
class TransformNaN:
# GH 12737
def setup(self):
self.df_nans = DataFrame(
{"key": np.repeat(np.arange(1000), 10), "B": np.nan, "C": np.nan}
)
self.df_nans.loc[4::10, "B":"C"] = 5
def time_first(self):
self.df_nans.groupby("key").transform("first")
class TransformEngine:
def setup(self):
N = 10 ** 3
data = DataFrame(
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
columns=[0, 1],
)
self.grouper = data.groupby(0)
def time_series_numba(self):
def function(values, index):
return values * 5
self.grouper[1].transform(function, engine="numba")
def time_series_cython(self):
def function(values):
return values * 5
self.grouper[1].transform(function, engine="cython")
def time_dataframe_numba(self):
def function(values, index):
return values * 5
self.grouper.transform(function, engine="numba")
def time_dataframe_cython(self):
def function(values):
return values * 5
self.grouper.transform(function, engine="cython")
class AggEngine:
def setup(self):
N = 10 ** 3
data = DataFrame(
{0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N},
columns=[0, 1],
)
self.grouper = data.groupby(0)
def time_series_numba(self):
def function(values, index):
total = 0
for i, value in enumerate(values):
if i % 2:
total += value + 5
else:
total += value * 2
return total
self.grouper[1].agg(function, engine="numba")
def time_series_cython(self):
def function(values):
total = 0
for i, value in enumerate(values):
if i % 2:
total += value + 5
else:
total += value * 2
return total
self.grouper[1].agg(function, engine="cython")
def time_dataframe_numba(self):
def function(values, index):
total = 0
for i, value in enumerate(values):
if i % 2:
total += value + 5
else:
total += value * 2
return total
self.grouper.agg(function, engine="numba")
def time_dataframe_cython(self):
def function(values):
total = 0
for i, value in enumerate(values):
if i % 2:
total += value + 5
else:
total += value * 2
return total
self.grouper.agg(function, engine="cython")
from .pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause |
pllim/astropy | astropy/io/misc/pandas/connect.py | 2 | 3733 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects the readers/writers to the astropy.table.Table class
import functools
import os.path
from astropy.table import Table
import astropy.io.registry as io_registry
__all__ = ['PANDAS_FMTS']
# Astropy users normally expect to not have an index, so default to turn
# off writing the index. This structure allows for astropy-specific
# customization of all options.
PANDAS_FMTS = {'csv': {'read': {},
'write': {'index': False}},
'fwf': {'read': {}}, # No writer
'html': {'read': {},
'write': {'index': False}},
'json': {'read': {},
'write': {}}}
PANDAS_PREFIX = 'pandas.'
# Imports for reading HTML
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def import_html_libs():
"""Try importing dependencies for reading HTML.
This is copied from pandas.io.html
"""
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
from astropy.utils.compat.optional_deps import (
HAS_BS4 as _HAS_BS4,
HAS_LXML as _HAS_LXML,
HAS_HTML5LIB as _HAS_HTML5LIB
)
_IMPORTS = True
def _pandas_read(fmt, filespec, **kwargs):
"""Provide io Table connector to read table using pandas.
"""
try:
import pandas
except ImportError:
raise ImportError('pandas must be installed to use pandas table reader')
pandas_fmt = fmt[len(PANDAS_PREFIX):] # chop the 'pandas.' in front
read_func = getattr(pandas, 'read_' + pandas_fmt)
# Get defaults and then override with user-supplied values
read_kwargs = PANDAS_FMTS[pandas_fmt]['read'].copy()
read_kwargs.update(kwargs)
# Special case: pandas defaults to HTML lxml for reading, but does not attempt
# to fall back to bs4 + html5lib. So do that now for convenience if user has
# not specifically selected a flavor. If things go wrong the pandas exception
# with instruction to install a library will come up.
if pandas_fmt == 'html' and 'flavor' not in kwargs:
import_html_libs()
if (not _HAS_LXML and _HAS_HTML5LIB and _HAS_BS4):
read_kwargs['flavor'] = 'bs4'
df = read_func(filespec, **read_kwargs)
# Special case for HTML
if pandas_fmt == 'html':
df = df[0]
return Table.from_pandas(df)
def _pandas_write(fmt, tbl, filespec, overwrite=False, **kwargs):
"""Provide io Table connector to write table using pandas.
"""
pandas_fmt = fmt[len(PANDAS_PREFIX):] # chop the 'pandas.' in front
# Get defaults and then override with user-supplied values
write_kwargs = PANDAS_FMTS[pandas_fmt]['write'].copy()
write_kwargs.update(kwargs)
df = tbl.to_pandas()
write_method = getattr(df, 'to_' + pandas_fmt)
if not overwrite:
try: # filespec is not always a path-like
exists = os.path.exists(filespec)
except TypeError: # skip invalid arguments
pass
else:
if exists: # only error if file already exists
raise OSError(f"{filespec} already exists")
return write_method(filespec, **write_kwargs)
for pandas_fmt, defaults in PANDAS_FMTS.items():
fmt = PANDAS_PREFIX + pandas_fmt # Full format specifier
if 'read' in defaults:
func = functools.partial(_pandas_read, fmt)
io_registry.register_reader(fmt, Table, func)
if 'write' in defaults:
func = functools.partial(_pandas_write, fmt)
io_registry.register_writer(fmt, Table, func)
| bsd-3-clause |
katsumio/test | rs_base_glu.py | 1 | 2719 | # -*- coding: utf-8 -*-
# txtt@CÌ«ÝÆÇÝÝ
#
# ϼ = open(t@C¼,[h)
# with\¶iwith t@CÇÝÝ as ÏjFclose¶ª¢çÈ¢
# for Ï in IuWFNg:
# Às·é
# XCXðgÁ½ª¶ñÌæ¾ [0:6]
#
# Create 2017/06/30
# update 2017/06/30
# Auther Katsumi.Oshiro
import csv # csvW
[ÌÇÝÝiCSVt@CÌÇÝ«j
import glob # globW
[ÌÇÝÝit@C¼Ìp^[}b`Oj
import pandas as pd # pandasW
[ÌÇÝÝ
print('RS_Baset¸CSVf[^ÌæiSTARTj')
# «i³ÒIDA¶Nújðú»
birth = {}
# ³Ò}X^[iname.csvjÌÇÝÝ
with open('../data/name.csv', 'r')as f:
reader = csv.reader(f)
for row in reader:
# print(row[0],row[1],row[2],row[3])
birth.update({row[0]:row[3]})
# «(birth)F¶NúÌõeXgi³ÒID:679j
print('³ÒID:679̶Nú->', birth["679"])
# NîvZeXgi³ÒID:679j
today = int(pd.to_datetime('today').strftime('%Y%m%d'))
birthday = int(pd.to_datetime(birth["679"]).strftime('%Y%m%d'))
print('³ÒID:697ÌNî->', int((today - birthday)/ 10000))
# tH_àÌt¸t@C¼Ìæ¾iChJ[hªgpÂ\j
txt_file = glob.glob('../data/labo/*.txt')
# ¸ÊÌoÍ
# ³f[^Flow[0]¶Nú,low[1]³ÒID,low[2]¼,low[3]«Ê,low[5]¸Ú¼,low[6]»è,low[10]Êlj
with open("../data/labo/GLU-P.csv", "w") as f:
for file_name in txt_file:
with open(file_name, 'r')as f2:
reader = csv.reader(f2)
for low in reader:
if low[5] == "GLU-P":
writer = csv.writer(f, lineterminator='\n')
listdata = []
listdata.append(low[1])
today = int(pd.to_datetime(low[0]).strftime('%Y%m%d'))
birthday = int(pd.to_datetime(birth[low[1]]).strftime('%Y%m%d'))
listdata.append(int((today - birthday)/ 10000))
if low[1] == "679":
listdata.append('')
listdata.append('')
listdata.append('')
listdata.append(low[10])
elif low[6] == "H":
listdata.append('')
listdata.append('')
listdata.append(low[10])
elif low[6] == "L":
listdata.append('')
listdata.append(low[10])
else:
listdata.append(low[10])
listdata.append('')
writer.writerow(listdata)
print('RS_Baset¸CSVf[^ÌæiENDj')
| mit |
allisongpatterson/SoftwareSystems | hw03/thinkplot.py | 88 | 12565 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
return cls.color_iter
def PrePlot(num=None, rows=1, cols=1):
"""Takes hints about what's coming.
num: number of lines that will be plotted
"""
if num:
Brewer.InitializeIter(num)
# TODO: get sharey and sharex working. probably means switching
# to subplots instead of subplot.
# also, get rid of the gray background.
if rows > 1 or cols > 1:
pyplot.subplots(rows, cols, sharey=True)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(rows, cols, plot_number):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.iteritems():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(xs, ys, style='', **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
color_iter = Brewer.GetIter()
if color_iter:
try:
options = Underride(options, color=color_iter.next())
except StopIteration:
print 'Warning: Brewer ran out of colors.'
Brewer.ClearIter()
options = Underride(options, linewidth=3, alpha=0.8)
pyplot.plot(xs, ys, style, **options)
def Scatter(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
pyplot.scatter(xs, ys, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, fs = hist.Render()
width = min(Diff(xs))
if hist.name:
options = Underride(options, label=hist.name)
options = Underride(options,
align='center',
linewidth=0,
width=width)
pyplot.bar(xs, fs, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs.pop()
ps.pop()
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs.pop(0)
ps.pop(0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.name:
options = Underride(options, label=cdf.name)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
loc = options.get('loc', 0)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
# TODO: figure out how to show more than one plot
Config(**options)
pyplot.show()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print 'Writing', filename
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print color
if __name__ == '__main__':
main()
| gpl-3.0 |
carrillo/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
fnielsen/brede | brede/data/neurosynth.py | 1 | 7374 | """brede.data.neurosynth - Interface to Neurosynth data.
Usage:
brede.data.neurosynth <command>
Options:
-h --help Help
Examples:
$ python -m brede.data.neurosynth featurenames
If command=redownload the the database file is redownloaded from the Git
repository. This command will also unpack the new data and overwrite the old.
command=featurenames will return a comma-separated list of feature names, i.e.
the first row of the features.txt file.
Otherwise outputs the Neurosynth database as comma-separated values
(This is a pretty long listing).
"""
from __future__ import absolute_import, print_function
import logging
import tarfile
from os import chdir, getcwd, makedirs
from os.path import exists, expanduser, join
from urllib import urlretrieve
from nltk.tokenize.punkt import PunktSentenceTokenizer
import pandas as pd
from .core import Data
from .pubmed import Pubmed
from ..config import config
NEUROSYNTH_DATABASE_URL = "http://old.neurosynth.org/data/current_data.tar.gz"
NEUROSYNTH_DATABASE_URL = ("https://github.com/neurosynth/neurosynth-data/"
"blob/master/current_data.tar.gz?raw=true")
class NeurosynthDatabase(Data):
"""Interface to dump of Neurosynth.
Data from the Neurosynth website will be downloaded to a local directory.
Data is read from the local directory. Coordinates and 'features' (words)
are available from the database.
Example
-------
>>> nd = NeurosynthDatabase()
>>> database = nd.database()
>>> 'MNI' in database.space.values
True
"""
def __init__(self):
"""Setup directories and filenames."""
self.logger = logging.getLogger(__name__ + '.Pubmed')
self.logger.addHandler(logging.NullHandler())
self.data_dir = expanduser(config.get('data', 'data_dir'))
self.logger.info('Data directory: {}'.format(self.data_dir))
self.neurosynth_dir = join(self.data_dir, 'neurosynth')
self.neurosynth_database_filename = join(self.neurosynth_dir,
'database.txt')
self.neurosynth_features_filename = join(self.neurosynth_dir,
'features.txt')
self.neurosynth_download_filename = join(self.neurosynth_dir,
'current_data.tar.gz')
self.neurosynth_database_url = NEUROSYNTH_DATABASE_URL
@property
def name(self):
"""Return short name for database."""
return "Neurosynth"
@property
def description(self):
"""Return a descriptive string about the data."""
return ("Neurosynth is a database setup by Tal Yarkoni and "
"contains stereotaxic coordinates from functional "
"neuroimaging studies.")
def __str__(self):
"""Return descriptive string."""
string = "<NeurosynthDatabase({}x{})>"
df = self.database()
return string.format(*df.shape)
def make_dir(self):
"""Make Neurosynth data directory."""
if not exists(self.neurosynth_dir):
makedirs(self.neurosynth_dir)
def download(self):
"""Download Neurosynth database file."""
self.make_dir()
urlretrieve(self.neurosynth_database_url,
self.neurosynth_download_filename)
def unpack(self, reunpack=False):
"""Extract the downloaded compressed Neurosynth dump file."""
if reunpack or ((not exists(self.neurosynth_database_filename) and
not exists(self.neurosynth_features_filename))):
if not exists(self.neurosynth_download_filename):
self.download()
cwd = getcwd()
chdir(self.neurosynth_dir)
try:
with tarfile.open(self.neurosynth_download_filename,
'r:gz') as fid:
fid.extractall()
finally:
chdir(cwd)
def database(self):
"""Return database as dataframe.
Returns
-------
database : pandas.DataFrame
Dataframe with data from database.txt.
"""
self.unpack()
self.logger.info('Reading {}'.format(
self.neurosynth_database_filename))
database = pd.read_csv(self.neurosynth_database_filename,
sep='\t', low_memory=False)
return database
def features(self):
"""Return Neurosynth features as dataframe.
Returns
-------
features : pandas.DataFrame
Dataframe with features
Examples
--------
>>> nd = NeurosynthDatabase()
>>> features = nd.features()
>>> 23400116 in features.index
True
"""
self.unpack()
features = pd.read_csv(self.neurosynth_features_filename,
sep='\t', low_memory=False,
index_col=0)
return features
def feature_names(self):
"""Return list of feature names.
Returns
-------
feature_names : list of strings
Words and phrases from first line of features.txt
Examples
--------
>>> nd = NeurosynthDatabase()
>>> 'attention' in nd.feature_names()
True
"""
self.unpack()
features = pd.read_csv(self.neurosynth_features_filename,
sep='\t', low_memory=False, nrows=2,
index_col=0)
return features.columns.tolist()
def medlines(self):
"""Return list of Medline structures for papers in Neurosynth.
Returns
-------
medlines : list of Bio.Medline.Record
List of Medline strutures
Examples
--------
>>> nd = NeurosynthDatabase()
>>> medlines = nd.medlines()
>>> authors = [m['FAU'] for m in medlines if m['PMID'] == '15238438']
>>> 'Nielsen, Finn A' in authors[0]
True
"""
nd_database = self.database()
pubmed = Pubmed()
medlines = pubmed.get_medlines(set(nd_database.id))
return medlines
def sentences(self):
"""Yield sentences from abstracts.
Yields
------
sentences : str
Yields sentences from abstract.
"""
tokenizer = PunktSentenceTokenizer()
for medline in self.medlines():
if 'AB' not in medline:
continue
abstract = medline['AB']
sentences = tokenizer.tokenize(abstract)
for sentence in sentences:
yield sentence
def main(args):
"""Handle command-line interface."""
command = args['<command>']
if command == 'redownload':
nd = NeurosynthDatabase()
nd.download()
nd.unpack(reunpack=True)
elif command == 'featurenames':
nd = NeurosynthDatabase()
print(",".join(nd.feature_names()))
elif command == 'sentences':
nd = NeurosynthDatabase()
for sentence in nd.sentences():
print(sentence)
else:
nd = NeurosynthDatabase()
print(nd.database().to_csv())
if __name__ == '__main__':
from docopt import docopt
main(docopt(__doc__))
| gpl-3.0 |
mattgiguere/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
detrout/debian-statsmodels | docs/sphinxext/ipython_directive.py | 30 | 27623 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
import time
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
#handle try/except blocks. only catch outer except
if re.match(continuation + '\sexcept:', nextline):
inputline += '\n' + nextline[Nc+1:]
else:
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
block = '\n'.join(content)
# remove blank lines
block = re.sub('\n+', '\n', block)
content = block.split('\n')
# if any figures, make sure you can handle them and no other figures exist
if re.search('^\s*@savefig', block, flags=re.MULTILINE):
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
# sub out the pseudo-decorators so we can parse
block = re.sub('@(?=[savefig|suppress|verbatim|doctest])', '#@', block)
# this is going to raise an error if there's problems
# in the python. if you want errors, make an ipython block
parsed_block = ast.parse(block)
in_lines = [i.lineno for i in parsed_block.body]
output = []
ct = 1
for lineno, line in enumerate(content):
line_stripped = line.strip('\n')
if lineno + 1 in in_lines: # this is an input line
modified = u"%s %s" % (fmtin % ct, line_stripped)
ct += 1
elif line.startswith('@'): # is it a decorator?
modified = line
else: # this is something else
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
modified = u'%s %s' % (continuation, line)
output.append(modified)
output = re.sub('#@(?=[savefig|suppress|verbatim|doctest])', '@',
'\n'.join(output)).split('\n')
# put blank lines after input lines
for i in in_lines[1:][::-1]:
output.insert(i-1, u'')
# fix the spacing for decorators
# might be a cleaner regex for
# \n@savefig name.png\n\n -> \n\n@savefig name.png\n
decpat1 = '(?<=@[savefig|suppress|verbatim|doctest])(?P<options>.+)\n\n'
output = re.sub(decpat1, '\g<options>\n','\n'.join(output))
decpat2 = '\n(?=@[savefig|suppress|verbatim|doctest])'
output = re.sub(decpat2, '\n\n', output).split('\n')
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# make a file in this directory, if there's already one
# if it's older than 5 minutes, delete it
# this needs a more robust solution
cur_dir = os.path.normpath(
os.path.join(self.state.document.settings.env.srcdir,
'..'))
tmp_file = os.path.join(cur_dir, 'seen_docs.temp')
if os.path.exists(tmp_file):
file_t = os.path.getmtime(tmp_file)
now_t = time.time()
if (now_t - file_t)/60. >= 5:
docs = []
os.remove(tmp_file)
else:
docs = open(tmp_file, 'r').read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fout = open(tmp_file, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
#self.teardown() # this gets called on _every_ exit from a block
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| bsd-3-clause |
muku42/bokeh | bokeh/tests/test_protocol.py | 42 | 3959 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
self.assertEqual(self.encoder.default(nptrue), True)
self.assertIsInstance(self.encoder.default(nptrue), bool)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh.protocol import serialize_json, deserialize_json
self.serialize = serialize_json
self.deserialize = deserialize_json
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/viewer/canvastools/linetool.py | 43 | 6911 | import numpy as np
from matplotlib import lines
from ...viewer.canvastools.base import CanvasToolBase, ToolHandles
__all__ = ['LineTool', 'ThickLineTool']
class LineTool(CanvasToolBase):
"""Widget for line selection in a plot.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, line_props=None, handle_props=None,
**kwargs):
super(LineTool, self).__init__(manager, on_move=on_move,
on_enter=on_enter,
on_release=on_release, **kwargs)
props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt')
props.update(line_props if line_props is not None else {})
self.linewidth = props['linewidth']
self.maxdist = maxdist
self._active_pt = None
x = (0, 0)
y = (0, 0)
self._end_pts = np.transpose([x, y])
self._line = lines.Line2D(x, y, visible=False, animated=True, **props)
self.ax.add_line(self._line)
self._handles = ToolHandles(self.ax, x, y,
marker_props=handle_props)
self._handles.set_visible(False)
self.artists = [self._line, self._handles.artist]
if on_enter is None:
def on_enter(pts):
x, y = np.transpose(pts)
print("length = %0.2f" %
np.sqrt(np.diff(x)**2 + np.diff(y)**2))
self.callback_on_enter = on_enter
self.manager.add_tool(self)
@property
def end_points(self):
return self._end_pts.astype(int)
@end_points.setter
def end_points(self, pts):
self._end_pts = np.asarray(pts)
self._line.set_data(np.transpose(pts))
self._handles.set_data(np.transpose(pts))
self._line.set_linewidth(self.linewidth)
self.set_visible(True)
self.redraw()
def hit_test(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return False
idx, px_dist = self._handles.closest(event.x, event.y)
if px_dist < self.maxdist:
self._active_pt = idx
return True
else:
self._active_pt = None
return False
def on_mouse_press(self, event):
self.set_visible(True)
if self._active_pt is None:
self._active_pt = 0
x, y = event.xdata, event.ydata
self._end_pts = np.array([[x, y], [x, y]])
def on_mouse_release(self, event):
if event.button != 1:
return
self._active_pt = None
self.callback_on_release(self.geometry)
self.redraw()
def on_move(self, event):
if event.button != 1 or self._active_pt is None:
return
if not self.ax.in_axes(event):
return
self.update(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update(self, x=None, y=None):
if x is not None:
self._end_pts[self._active_pt, :] = x, y
self.end_points = self._end_pts
@property
def geometry(self):
return self.end_points
class ThickLineTool(LineTool):
"""Widget for line selection in a plot.
The thickness of the line can be varied using the mouse scroll wheel, or
with the '+' and '-' keys.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
on_change : function
Function called whenever the line thickness is changed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_enter=None, on_release=None,
on_change=None, maxdist=10, line_props=None, handle_props=None):
super(ThickLineTool, self).__init__(manager,
on_move=on_move,
on_enter=on_enter,
on_release=on_release,
maxdist=maxdist,
line_props=line_props,
handle_props=handle_props)
if on_change is None:
def on_change(*args):
pass
self.callback_on_change = on_change
def on_scroll(self, event):
if not event.inaxes:
return
if event.button == 'up':
self._thicken_scan_line()
elif event.button == 'down':
self._shrink_scan_line()
def on_key_press(self, event):
if event.key == '+':
self._thicken_scan_line()
elif event.key == '-':
self._shrink_scan_line()
def _thicken_scan_line(self):
self.linewidth += 1
self.update()
self.callback_on_change(self.geometry)
def _shrink_scan_line(self):
if self.linewidth > 1:
self.linewidth -= 1
self.update()
self.callback_on_change(self.geometry)
if __name__ == '__main__': # pragma: no cover
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
h, w = image.shape
line_tool = ThickLineTool(viewer)
line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])
viewer.show()
| gpl-3.0 |
macks22/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
abimannans/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
liam2/larray | larray/__init__.py | 1 | 4707 | from __future__ import absolute_import, division, print_function
__version__ = '0.32.1'
from larray.core.axis import Axis, AxisCollection, X
from larray.core.group import Group, LGroup, LSet, IGroup, union
from larray.core.array import (Array, zeros, zeros_like, ones, ones_like, empty, empty_like, full,
full_like, sequence, labels_array, ndtest, asarray, identity, diag,
eye, all, any, sum, prod, cumsum, cumprod, min, max, mean, ptp, var,
std, median, percentile, stack, zip_array_values, zip_array_items)
from larray.core.session import Session, local_arrays, global_arrays, arrays
from larray.core.constants import nan, inf, pi, e, euler_gamma
from larray.core.metadata import Metadata
from larray.core.ufuncs import wrap_elementwise_array_func, maximum, minimum, where
from larray.core.npufuncs import (sin, cos, tan, arcsin, arccos, arctan, hypot, arctan2, degrees,
radians, unwrap, sinh, cosh, tanh, arcsinh, arccosh, arctanh,
angle, real, imag, conj,
round, around, round_, rint, fix, floor, ceil, trunc,
exp, expm1, exp2, log, log10, log2, log1p, logaddexp, logaddexp2,
i0, sinc, signbit, copysign, frexp, ldexp,
convolve, clip, sqrt, absolute, fabs, sign, fmax, fmin, nan_to_num,
real_if_close, interp, isnan, isinf, inverse)
from larray.inout.misc import from_lists, from_string
from larray.inout.pandas import from_frame, from_series
from larray.inout.csv import read_csv, read_tsv, read_eurostat
from larray.inout.excel import read_excel
from larray.inout.hdf import read_hdf
from larray.inout.sas import read_sas
from larray.inout.stata import read_stata
from larray.inout.xw_excel import open_excel, Workbook
from larray.inout.xw_reporting import ExcelReport, ReportSheet
# just make sure handlers for .pkl and .pickle are initialized
import larray.inout.pickle as _pkl
del _pkl
from larray.util.options import get_options, set_options
from larray.viewer import view, edit, debug, compare, run_editor_on_exception
from larray.extra.ipfp import ipfp
from larray.example import get_example_filepath, load_example_data, EXAMPLE_EXCEL_TEMPLATES_DIR
import larray.random
__all__ = [
# axis
'Axis', 'AxisCollection', 'X',
# group
'Group', 'LGroup', 'LSet', 'IGroup', 'union',
# array
'Array', 'zeros', 'zeros_like', 'ones', 'ones_like', 'empty', 'empty_like', 'full',
'full_like', 'sequence', 'labels_array', 'ndtest', 'asarray', 'identity', 'diag', 'eye',
'all', 'any', 'sum', 'prod', 'cumsum', 'cumprod', 'min', 'max', 'mean', 'ptp', 'var', 'std',
'median', 'percentile', 'stack', 'zip_array_values', 'zip_array_items',
# session
'Session', 'local_arrays', 'global_arrays', 'arrays',
# constants
'nan', 'inf', 'pi', 'e', 'euler_gamma',
# metadata
'Metadata',
# ufuncs
'wrap_elementwise_array_func',
'maximum', 'minimum', 'where',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'hypot', 'arctan2', 'degrees', 'radians',
'unwrap', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
'angle', 'real', 'imag', 'conj',
'round', 'around', 'round_', 'rint', 'fix', 'floor', 'ceil', 'trunc',
'exp', 'expm1', 'exp2', 'log', 'log10', 'log2', 'log1p', 'logaddexp', 'logaddexp2',
'i0', 'sinc', 'signbit', 'copysign', 'frexp', 'ldexp',
'convolve', 'clip', 'sqrt', 'absolute', 'fabs', 'sign', 'fmax', 'fmin', 'nan_to_num',
'real_if_close', 'interp', 'isnan', 'isinf', 'inverse',
# inout
'from_lists', 'from_string', 'from_frame', 'from_series', 'read_csv', 'read_tsv',
'read_eurostat', 'read_excel', 'read_hdf', 'read_sas', 'read_stata',
'open_excel', 'Workbook', 'ExcelReport', 'ReportSheet',
# utils
'get_options', 'set_options',
# viewer
'view', 'edit', 'debug', 'compare', 'run_editor_on_exception',
# ipfp
'ipfp',
# example
'get_example_filepath', 'load_example_data', 'EXAMPLE_EXCEL_TEMPLATES_DIR',
]
# ==== DEPRECATED API ====
from larray.core.axis import x
from larray.core.group import PGroup
from larray.core.array import (LArray, aslarray, create_sequential, ndrange, larray_equal,
larray_nan_equal, nan_equal, element_equal)
_deprecated = [
# axis
'x',
# group
'PGroup',
# array
'LArray', 'aslarray',
'create_sequential', 'ndrange',
'larray_equal', 'larray_nan_equal', 'nan_equal', 'element_equal',
]
__all__ += _deprecated
| gpl-3.0 |
jorik041/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
btabibian/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/Math_module_hybrid.py | 1 | 21531 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 13:23:54 2018
@author: butenko
"""
from dolfin import *
from pandas import read_csv
import numpy as np
import os
import time as tm
from tissue_dielectrics import DielectricProperties
parameters["allow_extrapolation"]=True
parameters['linear_algebra_backend']='PETSc'
set_log_active(False) #turns off debugging info
def choose_solver_for_me(EQS_mode,float_conductors):
if float_conductors != -1: #that means we have floating conductors
if EQS_mode=='EQS':
return('MUMPS') # maybe for QS with only one floating conductor we could use GMRES
else:
return('GMRES')
else:
if EQS_mode=='EQS':
return('BiCGSTAB')
else:
return('GMRES')
def get_current_density(mesh,element_order,EQS_mode,kappa,Cond_tensor,E_field_real,E_field_imag):
if element_order>1:
W =VectorFunctionSpace(mesh,'DG',element_order-1)
W_i =VectorFunctionSpace(mesh,'DG',element_order-1)
else:
W =VectorFunctionSpace(mesh,'DG',element_order)
W_i =VectorFunctionSpace(mesh,'DG',element_order)
w = TestFunction(W)
Pv = TrialFunction(W)
j_dens_real = Function(W)
a_local = inner(w, Pv) * dx
if EQS_mode == 'EQS':
w_i = TestFunction(W_i)
Pv_i = TrialFunction(W_i)
j_dens_im = Function(W_i)
a_local_imag = inner(w_i, Pv_i) * dx
if Cond_tensor!=False:
L_local = inner(w, (Cond_tensor*E_field_real-kappa[1]*E_field_imag)) * dx
L_local_imag = inner(w_i, (Cond_tensor*E_field_imag+kappa[1]*E_field_real)) * dx
else:
L_local = inner(w, (kappa[0]*E_field_real-kappa[1]*E_field_imag)) * dx
L_local_imag = inner(w_i, (kappa[0]*E_field_imag+kappa[1]*E_field_real)) * dx
A_local_imag, b_local_imag = assemble_system(a_local_imag, L_local_imag, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local_imag,j_dens_im.vector(),b_local_imag)
else:
j_dens_im=Function(W)
j_dens_im.vector()[:] = 0.0
if Cond_tensor!=False:
L_local = inner(w, (Cond_tensor*E_field_real)) * dx
else:
L_local = inner(w, (kappa[0]*E_field_real)) * dx
A_local, b_local = assemble_system(a_local, L_local, bcs=[])
local_solver = PETScKrylovSolver('bicgstab')
local_solver.solve(A_local,j_dens_real.vector(),b_local)
return j_dens_real,j_dens_im
def get_field(mesh_sol,Domains,subdomains,boundaries_sol,Field_calc_param):
set_log_active(False) #turns off debugging info
print("_________________________")
parameters['linear_algebra_backend']='PETSc'
[cond_GM, perm_GM]=DielectricProperties(3).get_dielectrics(Field_calc_param.frequenc) #3 for grey matter and so on (numeration as in voxel_data)
[cond_WM, perm_WM]=DielectricProperties(2).get_dielectrics(Field_calc_param.frequenc)
[cond_CSF, perm_CSF]=DielectricProperties(1).get_dielectrics(Field_calc_param.frequenc)
[cond_default,perm_default]=DielectricProperties(Field_calc_param.default_material).get_dielectrics(Field_calc_param.frequenc)
from GUI_inp_dict import d as d_encap
[cond_encap, perm_encap]=DielectricProperties(d_encap['encap_tissue_type']).get_dielectrics(Field_calc_param.frequenc)
cond_encap=cond_encap*d_encap['encap_scaling_cond']
perm_encap=perm_encap*d_encap['encap_scaling_perm']
if Field_calc_param.Solver_type=='Default':
Solver_type=choose_solver_for_me(Field_calc_param.EQS_mode,Domains.Float_contacts) #choses solver basing on the Laplace formulation and whether the floating conductors are used
else:
Solver_type=Field_calc_param.Solver_type # just get the solver directly
conductivities=[cond_default,cond_GM,cond_WM,cond_CSF,cond_encap]
rel_permittivities=[perm_default,perm_GM,perm_WM,perm_CSF,perm_encap]
# to get conductivity (and permittivity if EQS formulation) mapped accrodingly to the subdomains. k_val_r is just a list of conductivities (S/mm!) in a specific order to scale the cond. tensor
from FEM_in_spectrum import get_dielectric_properties_from_subdomains
kappa,k_val_r=get_dielectric_properties_from_subdomains(mesh_sol,subdomains,Field_calc_param.EQS_mode,Domains.Float_contacts,conductivities,rel_permittivities,Field_calc_param.frequenc)
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_subdomains_map.pvd')
file<<subdomains
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_conductivity_map.pvd')
file<<kappa[0]
if Field_calc_param.EQS_mode == 'EQS':
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_permittivity_map.pvd')
file<<kappa[1]
if Field_calc_param.anisotropy==1:
# order xx,xy,xz,yy,yz,zz
c00 = MeshFunction("double", mesh_sol, 3, 0.0)
c01 = MeshFunction("double", mesh_sol, 3, 0.0)
c02 = MeshFunction("double", mesh_sol, 3, 0.0)
c11 = MeshFunction("double", mesh_sol, 3, 0.0)
c12 = MeshFunction("double", mesh_sol, 3, 0.0)
c22 = MeshFunction("double", mesh_sol, 3, 0.0)
# load the diffusion tensor (should be normalized beforehand)
hdf = HDF5File(mesh_sol.mpi_comm(), os.environ['PATIENTDIR']+"/Results_adaptive/Tensors_to_solve_num_el_"+str(mesh_sol.num_cells())+".h5", "r")
hdf.read(c00, "/c00")
hdf.read(c01, "/c01")
hdf.read(c02, "/c02")
hdf.read(c11, "/c11")
hdf.read(c12, "/c12")
hdf.read(c22, "/c22")
hdf.close()
unscaled_tensor=[c00,c01,c02,c11,c12,c22]
# to get tensor scaled by the conductivity map (twice send Field_calc_param.frequenc to always get unscaled ellipsoid tensor for visualization)
from FEM_in_spectrum import get_scaled_cond_tensor
Cond_tensor=get_scaled_cond_tensor(mesh_sol,subdomains,Field_calc_param.frequenc,Field_calc_param.frequenc,unscaled_tensor,k_val_r,plot_tensors=True)
else:
Cond_tensor=False #just to initialize
#In case of current-controlled stimulation, Dirichlet_bc or the whole potential distribution will be scaled afterwards (due to the system's linearity)
from FEM_in_spectrum import get_solution_space_and_Dirichlet_BC
V_space,Dirichlet_bc,ground_index,facets=get_solution_space_and_Dirichlet_BC(Field_calc_param.external_grounding,Field_calc_param.c_c,mesh_sol,subdomains,boundaries_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,Domains.Contacts,Domains.fi)
#ground index refers to the ground in .med/.msh file
print("dofs: ",(max(V_space.dofmap().dofs())+1))
print("N of elements: ",mesh_sol.num_cells())
#facets = MeshFunction('size_t',mesh_sol,2)
#facets.set_all(0)
if Field_calc_param.external_grounding==False: # otherwise we have it already from get_solution_space_and_Dirichlet_BC()
facets.array()[boundaries_sol.array()==Domains.Contacts[ground_index]]=1
dsS=Measure("ds",domain=mesh_sol,subdomain_data=facets)
Ground_surface_size=assemble(1.0*dsS(1))
dx = Measure("dx",domain=mesh_sol)
# to solve the Laplace equation div(kappa*grad(phi))=0 (variational form: a(u,v)=L(v))
start_math=tm.time()
from FEM_in_spectrum import define_variational_form_and_solve
phi_sol=define_variational_form_and_solve(V_space,Dirichlet_bc,kappa,Field_calc_param.EQS_mode,Cond_tensor,Solver_type)
minutes=int((tm.time() - start_math)/60)
secnds=int(tm.time() - start_math)-minutes*60
print("--- assembled and solved in ",minutes," min ",secnds," s ---")
if Field_calc_param.EQS_mode == 'EQS':
(phi_r_sol,phi_i_sol)=phi_sol.split(deepcopy=True)
else:
phi_r_sol=phi_sol
phi_i_sol=Function(V_space)
phi_i_sol.vector()[:] = 0.0
# get current flowing through the grounded contact and the electric field in the whole domain
from FEM_in_spectrum import get_current
J_ground,E_field,E_field_im = get_current(mesh_sol,facets,boundaries_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,Domains.Contacts,kappa,Cond_tensor,phi_r_sol,phi_i_sol,ground_index,get_E_field=True)
#print("J_ground_unscaled: ",J_ground)
# If EQS, J_ground is a complex number. If QS, E_field_im is a null function
# to get current density function which is required for mesh refinement when checking current convergence
j_dens_real,j_dens_im = get_current_density(mesh_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,kappa,Cond_tensor,E_field,E_field_im)
# If QS, j_dens_im is null function
# will be used for mesh refinement
j_dens_real_unscaled=j_dens_real.copy(deepcopy=True)
j_dens_im_unscaled=j_dens_im.copy(deepcopy=True) # null function if QS
import copy
J_real_unscaled=copy.deepcopy(np.real(J_ground))
J_im_unscaled=copy.deepcopy(np.imag(J_ground)) # 0 if QS
# to project the E-field magnitude
if Field_calc_param.element_order>1:
V_normE=FunctionSpace(mesh_sol,"CG",Field_calc_param.element_order-1)
else:
V_normE=FunctionSpace(mesh_sol,"CG",Field_calc_param.element_order)
#V_across=max(Domains.fi[:], key=abs) #actually, not across, but against ground!!!
if Field_calc_param.external_grounding==True and (Field_calc_param.c_c==1 or len(Domains.fi)==1):
V_max=max(Domains.fi[:], key=abs)
V_min=0.0
elif -1*Domains.fi[0]==Domains.fi[1]: # V_across is needed only for 2 active contact systems
V_min=-1*abs(Domains.fi[0])
V_max=abs(Domains.fi[0])
else:
V_min=min(Domains.fi[:], key=abs)
V_max=max(Domains.fi[:], key=abs)
V_across=V_max-V_min # this can be negative
Vertices_get=read_csv(os.environ['PATIENTDIR']+'/Neuron_model_arrays/Vert_of_Neural_model_NEURON.csv', delimiter=' ', header=None)
Vertices_array=Vertices_get.values
Phi_ROI=np.zeros((Vertices_array.shape[0],4),float)
for inx in range(Vertices_array.shape[0]):
pnt=Point(Vertices_array[inx,0],Vertices_array[inx,1],Vertices_array[inx,2])
Phi_ROI[inx,0]=Vertices_array[inx,0]
Phi_ROI[inx,1]=Vertices_array[inx,1]
Phi_ROI[inx,2]=Vertices_array[inx,2]
if Field_calc_param.c_c==1:
phi_r_sol_scaled_on_point=V_across*np.real((phi_r_sol(pnt)+1j*phi_i_sol(pnt))/J_ground)
phi_i_sol_scaled_on_point=V_across*np.imag((phi_r_sol(pnt)+1j*phi_i_sol(pnt))/J_ground)
Phi_ROI[inx,3]=np.sqrt(phi_r_sol_scaled_on_point*phi_r_sol_scaled_on_point+phi_i_sol_scaled_on_point*phi_i_sol_scaled_on_point)
else:
Phi_ROI[inx,3]=np.sqrt(phi_r_sol(pnt)*phi_r_sol(pnt)+phi_i_sol(pnt)*phi_i_sol(pnt))
np.savetxt(os.environ['PATIENTDIR']+'/Results_adaptive/Phi_'+str(Field_calc_param.frequenc)+'.csv', Phi_ROI, delimiter=" ") # this is amplitude, actually
# #Probe_of_potential
# probe_z=np.zeros((100,4),float)
# for inx in range(100):
# pnt=Point(75.5,78.5,27.865+inx/10.0)
# probe_z[inx,0]=75.5
# probe_z[inx,1]=78.5
# probe_z[inx,2]=27.865+inx/10.0
# if Field_calc_param.c_c==1:
# phi_r_sol_scaled_on_point=V_across*np.real((phi_r_sol(pnt)+1j*phi_i_sol(pnt))/(J_real_unscaled+1j*J_im_unscaled))
# phi_i_sol_scaled_on_point=V_across*np.imag((phi_r_sol(pnt)+1j*phi_i_sol(pnt))/(J_real_unscaled+1j*J_im_unscaled))
# probe_z[inx,3]=np.sqrt(phi_r_sol_scaled_on_point*phi_r_sol_scaled_on_point+phi_i_sol_scaled_on_point*phi_i_sol_scaled_on_point)
# else:
# probe_z[inx,3]=np.sqrt(phi_r_sol(pnt)*phi_r_sol(pnt)+phi_i_sol(pnt)*phi_i_sol(pnt))
# np.savetxt('Results_adaptive/Phi_Zprobe'+str(Field_calc_param.frequenc)+'.csv', probe_z, delimiter=" ")
#print("Tissue impedance: ", Z_tis)
#=============================================================================#
if Field_calc_param.c_c==1 or Field_calc_param.CPE==1:
Z_tissue = V_across/J_ground # Tissue impedance
print("Tissue impedance: ", Z_tissue)
if Field_calc_param.CPE==1:
if len(Domains.fi)>2:
print("Currently, CPE can be used only for simulations with two contacts. Please, assign the rest to 'None'")
raise SystemExit
from GUI_inp_dict import d as d_cpe
CPE_param=[d_cpe["K_A"],d_cpe["beta"],d_cpe["K_A_ground"],d_cpe["beta_ground"]]
from FEM_in_spectrum import get_CPE_corrected_Dirichlet_BC
Dirichlet_bc_with_CPE,total_impedance=get_CPE_corrected_Dirichlet_BC(Field_calc_param.external_grounding,facets,boundaries_sol,CPE_param,Field_calc_param.EQS_mode,Field_calc_param.frequenc,Field_calc_param.frequenc,Domains.Contacts,Domains.fi,V_across,Z_tissue,V_space)
print("Solving for an adjusted potential on contacts to account for CPE")
start_math=tm.time()
# to solve the Laplace equation for the adjusted Dirichlet
phi_sol_CPE=define_variational_form_and_solve(V_space,Dirichlet_bc_with_CPE,kappa,Field_calc_param.EQS_mode,Cond_tensor,Solver_type)
minutes=int((tm.time() - start_math)/60)
secnds=int(tm.time() - start_math)-minutes*60
print("--- assembled and solved in ",minutes," min ",secnds," s ")
if Field_calc_param.EQS_mode=='EQS':
(phi_r_CPE,phi_i_CPE)=phi_sol_CPE.split(deepcopy=True)
else:
phi_r_CPE=phi_sol_CPE
phi_i_CPE=Function(V_space)
phi_i_CPE.vector()[:] = 0.0
# get current flowing through the grounded contact and the electric field in the whole domain
J_ground_CPE,E_field_CPE,E_field_im_CPE = get_current(mesh_sol,facets,boundaries_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,Domains.Contacts,kappa,Cond_tensor,phi_r_CPE,phi_i_CPE,ground_index,get_E_field=True)
# If EQS, J_ground is a complex number. If QS, E_field_CPE is a null function
# to get current density function which is required for mesh refinement when checking current convergence
j_dens_real_CPE,j_dens_im_CPE = get_current_density(mesh_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,kappa,Cond_tensor,E_field_CPE,E_field_im_CPE)
# If QS, j_dens_im is null function
# will be used for mesh refinement
j_dens_real_unscaled=j_dens_real_CPE.copy(deepcopy=True)
j_dens_im_unscaled=j_dens_im_CPE.copy(deepcopy=True)
J_real_unscaled=copy.deepcopy(np.real(J_ground))
J_im_unscaled=copy.deepcopy(np.imag(J_ground))
E_norm=project(sqrt(inner(E_field_CPE,E_field_CPE)+inner(E_field_im_CPE,E_field_im_CPE)),V_normE,solver_type="cg", preconditioner_type="amg")
max_E=E_norm.vector().max()
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/E_ampl_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<E_norm,mesh_sol
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_Phi_r_field_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<phi_r_CPE,mesh_sol
if Field_calc_param.EQS_mode=='EQS':
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_Phi_im_field_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<phi_i_CPE,mesh_sol
return phi_r_CPE,phi_i_CPE,E_field_CPE,E_field_im_CPE,max_E,J_real_unscaled,J_im_unscaled,j_dens_real_unscaled,j_dens_im_unscaled
if Field_calc_param.c_c==1:
if Field_calc_param.EQS_mode=='EQS': # For EQS, we need to scale the potential on boundaries (because the error is absolute) and recompute field, etc. Maybe we can scale them also directly?
Dirichlet_bc_scaled=[]
for bc_i in range(len(Domains.Contacts)): #CPE estimation is valid only for one activa and one ground contact configuration
if Field_calc_param.EQS_mode=='EQS':
if Domains.fi[bc_i]!=0.0:
Active_with_CC=V_across*V_across/J_ground #(impedance * current through the contact (V_across coincides with the assigned current magnitude))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), np.real(Active_with_CC), boundaries_sol,Domains.Contacts[bc_i]))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), np.imag(Active_with_CC), boundaries_sol,Domains.Contacts[bc_i]))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0), Constant(0.0), boundaries_sol,Domains.Contacts[bc_i]))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1), Constant(0.0), boundaries_sol,Domains.Contacts[bc_i]))
if Field_calc_param.external_grounding==True:
if Field_calc_param.EQS_mode == 'EQS':
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(0),0.0,facets,1))
Dirichlet_bc_scaled.append(DirichletBC(V_space.sub(1),0.0,facets,1))
else:
Dirichlet_bc_scaled.append(DirichletBC(V_space,0.0,facets,1))
print("Solving for a scaled potential on contacts (to match the desired current)")
start_math=tm.time()
# to solve the Laplace equation for the adjusted Dirichlet
phi_sol_scaled=define_variational_form_and_solve(V_space,Dirichlet_bc_scaled,kappa,Field_calc_param.EQS_mode,Cond_tensor,Solver_type)
minutes=int((tm.time() - start_math)/60)
secnds=int(tm.time() - start_math)-minutes*60
print("--- assembled and solved in ",minutes," min ",secnds," s ---")
(phi_r_sol_scaled,phi_i_sol_scaled)=phi_sol_scaled.split(deepcopy=True)
# get current flowing through the grounded contact and the electric field in the whole domain
J_ground_scaled,E_field_scaled,E_field_im_scaled = get_current(mesh_sol,facets,boundaries_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,Domains.Contacts,kappa,Cond_tensor,phi_r_sol_scaled,phi_i_sol_scaled,ground_index,get_E_field=True)
# If EQS, J_ground is a complex number. If QS, E_field_im is 0
else: # here we can simply scale the potential in the domain and recompute the E-field
phi_i_sol_scaled=Function(V_space)
phi_i_sol_scaled.vector()[:] = 0.0
phi_r_sol_scaled=Function(V_space)
phi_r_sol_scaled.vector()[:]=V_across*phi_r_sol.vector()[:]/J_ground
J_ground_scaled,E_field_scaled,E_field_im_scaled = get_current(mesh_sol,facets,boundaries_sol,Field_calc_param.element_order,Field_calc_param.EQS_mode,Domains.Contacts,kappa,Cond_tensor,phi_r_sol_scaled,phi_i_sol_scaled,ground_index,get_E_field=True)
#E_field_im_scale is a null function
E_norm=project(sqrt(inner(E_field_scaled,E_field_scaled)+inner(E_field_im_scaled,E_field_im_scaled)),V_normE,solver_type="cg", preconditioner_type="amg")
max_E=E_norm.vector().max()
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/E_ampl_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<E_norm,mesh_sol
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_Phi_r_field_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<phi_r_sol_scaled,mesh_sol
if Field_calc_param.EQS_mode=='EQS':
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_Phi_im_field_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<phi_i_sol_scaled,mesh_sol
return phi_r_sol_scaled,phi_i_sol_scaled,E_field_scaled,E_field_im_scaled,max_E,J_real_unscaled,J_im_unscaled,j_dens_real_unscaled,j_dens_im_unscaled
else:
E_norm=project(sqrt(inner(E_field,E_field)+inner(E_field_im,E_field_im)),V_normE,solver_type="cg", preconditioner_type="amg")
max_E=E_norm.vector().max()
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/E_ampl_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<E_norm,mesh_sol
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_Phi_r_field_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<phi_r_sol,mesh_sol
if Field_calc_param.EQS_mode=='EQS':
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Last_Phi_im_field_'+str(Field_calc_param.EQS_mode)+'.pvd')
file<<phi_i_sol,mesh_sol
return phi_r_sol,phi_i_sol,E_field,E_field_im,max_E,J_real_unscaled,J_im_unscaled,j_dens_real_unscaled,j_dens_im_unscaled
def get_field_on_points(phi_r,phi_i,c_c,J_r,J_i):
Vertices_neur_get=read_csv(os.environ['PATIENTDIR']+'/Neuron_model_arrays/Vert_of_Neural_model_NEURON.csv', delimiter=' ', header=None)
Vertices_neur=Vertices_neur_get.values
Ampl_ROI=np.zeros((Vertices_neur.shape[0],4),float)
for inx in range(Vertices_neur.shape[0]):
pnt=Point(Vertices_neur[inx,0],Vertices_neur[inx,1],Vertices_neur[inx,2])
Ampl_ROI[inx,3]=sqrt(phi_r(pnt)*phi_r(pnt)+phi_i(pnt)*phi_i(pnt))
Ampl_ROI[inx,0]=Vertices_neur[inx,0]
Ampl_ROI[inx,1]=Vertices_neur[inx,1]
Ampl_ROI[inx,2]=Vertices_neur[inx,2]
Ampl_ROI=Ampl_ROI[~np.all(Ampl_ROI==0.0,axis=1)] #deletes all zero enteries
return Ampl_ROI
| gpl-3.0 |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/winpython/disthelpers.py | 4 | 32030 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2011 CEA
# Pierre Raybaut
# Licensed under the terms of the CECILL License
# (see guidata/__init__.py for details)
# pylint: disable=W0613
"""
disthelpers
-----------
The ``guidata.disthelpers`` module provides helper functions for Python
package distribution on Microsoft Windows platforms with ``py2exe`` or on
all platforms thanks to ``cx_Freeze``.
"""
from __future__ import print_function
import sys
import os
import os.path as osp
import shutil
import traceback
import atexit
import imp
from subprocess import Popen, PIPE
import warnings
#==============================================================================
# Module, scripts, programs
#==============================================================================
def get_module_path(modname):
"""Return module *modname* base path"""
module = sys.modules.get(modname, __import__(modname))
return osp.abspath(osp.dirname(module.__file__))
#==============================================================================
# Dependency management
#==============================================================================
def get_changeset(path, rev=None):
"""Return Mercurial repository *path* revision number"""
args = ['hg', 'parent']
if rev is not None:
args += ['--rev', str(rev)]
process = Popen(args, stdout=PIPE, stderr=PIPE, cwd=path, shell=True)
try:
return process.stdout.read().splitlines()[0].split()[1]
except IndexError:
raise RuntimeError(process.stderr.read())
def prepend_module_to_path(module_path):
"""
Prepend to sys.path module located in *module_path*
Return string with module infos: name, revision, changeset
Use this function:
1) In your application to import local frozen copies of internal libraries
2) In your py2exe distributed package to add a text file containing the returned string
"""
if not osp.isdir(module_path):
# Assuming py2exe distribution
return
sys.path.insert(0, osp.abspath(module_path))
changeset = get_changeset(module_path)
name = osp.basename(module_path)
prefix = "Prepending module to sys.path"
message = prefix + ("%s [revision %s]" % (name, changeset)
).rjust(80 - len(prefix), ".")
print(message, file=sys.stderr)
if name in sys.modules:
sys.modules.pop(name)
nbsp = 0
for modname in sys.modules.keys():
if modname.startswith(name + '.'):
sys.modules.pop(modname)
nbsp += 1
warning = '(removed %s from sys.modules' % name
if nbsp:
warning += ' and %d subpackages' % nbsp
warning += ')'
print(warning.rjust(80), file=sys.stderr)
return message
def prepend_modules_to_path(module_base_path):
"""Prepend to sys.path all modules located in *module_base_path*"""
if not osp.isdir(module_base_path):
# Assuming py2exe distribution
return
fnames = [osp.join(module_base_path, name)
for name in os.listdir(module_base_path)]
messages = [prepend_module_to_path(dirname)
for dirname in fnames if osp.isdir(dirname)]
return os.linesep.join(messages)
#==============================================================================
# Distribution helpers
#==============================================================================
def _remove_later(fname):
"""Try to remove file later (at exit)"""
def try_to_remove(fname):
if osp.exists(fname):
os.remove(fname)
atexit.register(try_to_remove, osp.abspath(fname))
def get_msvc_version(python_version):
"""Return Microsoft Visual C++ version used to build this Python version"""
if python_version is None:
python_version = '2.7'
warnings.warn("assuming Python 2.7 target")
if python_version in ('2.6', '2.7', '3.0', '3.1', '3.2'):
# Python 2.6-2.7, 3.0-3.2 were built with Visual Studio 9.0.21022.8
# (i.e. Visual C++ 2008, not Visual C++ 2008 SP1!)
return "9.0.21022.8"
elif python_version in ('3.3', '3.4'):
# Python 3.3+ were built with Visual Studio 10.0.30319.1
# (i.e. Visual C++ 2010)
return '10.0'
elif python_version in ('3.5', '3.6'):
return '15.0'
else:
raise RuntimeError("Unsupported Python version %s" % python_version)
def get_msvc_dlls(msvc_version, architecture=None):
"""Get the list of Microsoft Visual C++ DLLs associated to
architecture and Python version, create the manifest file.
architecture: integer (32 or 64) -- if None, take the Python build arch
python_version: X.Y"""
current_architecture = 64 if sys.maxsize > 2**32 else 32
if architecture is None:
architecture = current_architecture
filelist = []
# simple vs2015 situation: nothing (system dll)
if msvc_version == '14.0':
return filelist
msvc_major = msvc_version.split('.')[0]
msvc_minor = msvc_version.split('.')[1]
if msvc_major == '9':
key = "1fc8b3b9a1e18e3b"
atype = "" if architecture == 64 else "win32"
arch = "amd64" if architecture == 64 else "x86"
groups = {
'CRT': ('msvcr90.dll', 'msvcp90.dll', 'msvcm90.dll'),
# 'OPENMP': ('vcomp90.dll',)
}
for group, dll_list in groups.items():
dlls = ''
for dll in dll_list:
dlls += ' <file name="%s" />%s' % (dll, os.linesep)
manifest =\
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Copyright (c) Microsoft Corporation. All rights reserved. -->
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<noInheritable/>
<assemblyIdentity
type="%(atype)s"
name="Microsoft.VC90.%(group)s"
version="%(version)s"
processorArchitecture="%(arch)s"
publicKeyToken="%(key)s"
/>
%(dlls)s</assembly>
""" % dict(version=msvc_version, key=key, atype=atype, arch=arch,
group=group, dlls=dlls)
vc90man = "Microsoft.VC90.%s.manifest" % group
open(vc90man, 'w').write(manifest)
_remove_later(vc90man)
filelist += [vc90man]
winsxs = osp.join(os.environ['windir'], 'WinSxS')
vcstr = '%s_Microsoft.VC90.%s_%s_%s' % (arch, group,
key, msvc_version)
for fname in os.listdir(winsxs):
path = osp.join(winsxs, fname)
if osp.isdir(path) and fname.lower().startswith(vcstr.lower()):
for dllname in os.listdir(path):
filelist.append(osp.join(path, dllname))
break
else:
raise RuntimeError("Microsoft Visual C++ %s DLLs version %s "\
"were not found" % (group, msvc_version))
elif msvc_major == '10' or msvc_major == '15': # 15 for vs 2015
namelist = [name % (msvc_major + msvc_minor) for name in
(
'msvcp%s.dll', 'msvcr%s.dll',
'vcomp%s.dll',
)]
if msvc_major == '15':
namelist = [name % ('14' + msvc_minor) for name in
(
'vcruntime%s.dll', 'msvcp%s.dll', 'vccorlib%s.dll',
'concrt%s.dll','vcomp%s.dll',
)]
windir = os.environ['windir']
is_64bit_windows = osp.isdir(osp.join(windir, "SysWOW64"))
# Reminder: WoW64 (*W*indows 32-bit *o*n *W*indows *64*-bit) is a
# subsystem of the Windows operating system capable of running 32-bit
# applications and is included on all 64-bit versions of Windows
# (source: http://en.wikipedia.org/wiki/WoW64)
#
# In other words, "SysWOW64" contains 64-bit DLL and applications,
# whereas "System32" contains 64-bit DLL and applications on a 64-bit
# system.
sysdir = "System32"
if not is_64bit_windows and architecture == 64:
raise RuntimeError("Can't find 64-bit MSVC DLLs on a 32-bit OS")
if is_64bit_windows and architecture == 32:
sysdir = "SysWOW64"
for dllname in namelist:
fname = osp.join(windir, sysdir, dllname)
print('searching', fname )
if osp.exists(fname):
filelist.append(fname)
else:
raise RuntimeError("Microsoft Visual C++ DLLs version %s "\
"were not found" % msvc_version)
else:
raise RuntimeError("Unsupported MSVC version %s" % msvc_version)
return filelist
def create_msvc_data_files(architecture=None, python_version=None,
verbose=False):
"""Including Microsoft Visual C++ DLLs"""
msvc_version = get_msvc_version(python_version)
filelist = get_msvc_dlls(msvc_version, architecture=architecture)
print(create_msvc_data_files.__doc__)
if verbose:
for name in filelist:
print(" ", name)
msvc_major = msvc_version.split('.')[0]
if msvc_major == '9':
return [("Microsoft.VC90.CRT", filelist),]
else:
return [("", filelist),]
def to_include_files(data_files):
"""Convert data_files list to include_files list
data_files:
* this is the ``py2exe`` data files format
* list of tuples (dest_dirname, (src_fname1, src_fname2, ...))
include_files:
* this is the ``cx_Freeze`` data files format
* list of tuples ((src_fname1, dst_fname1),
(src_fname2, dst_fname2), ...))
"""
include_files = []
for dest_dir, fnames in data_files:
for source_fname in fnames:
dest_fname = osp.join(dest_dir, osp.basename(source_fname))
include_files.append((source_fname, dest_fname))
return include_files
def strip_version(version):
"""Return version number with digits only
(Windows does not support strings in version numbers)"""
return version.split('beta')[0].split('alpha'
)[0].split('rc')[0].split('dev')[0]
def remove_dir(dirname):
"""Remove directory *dirname* and all its contents
Print details about the operation (progress, success/failure)"""
print("Removing directory '%s'..." % dirname, end=' ')
try:
shutil.rmtree(dirname, ignore_errors=True)
print("OK")
except Exception:
print("Failed!")
traceback.print_exc()
class Distribution(object):
"""Distribution object
Help creating an executable using ``py2exe`` or ``cx_Freeze``
"""
DEFAULT_EXCLUDES = ['Tkconstants', 'Tkinter', 'tcl', 'tk', 'wx',
'_imagingtk', 'curses', 'PIL._imagingtk', 'ImageTk',
'PIL.ImageTk', 'FixTk', 'bsddb', 'email',
'pywin.debugger', 'pywin.debugger.dbgcon',
'matplotlib']
DEFAULT_INCLUDES = []
DEFAULT_BIN_EXCLUDES = ['MSVCP100.dll', 'MSVCP90.dll', 'w9xpopen.exe',
'MSVCP80.dll', 'MSVCR80.dll']
DEFAULT_BIN_INCLUDES = []
DEFAULT_BIN_PATH_INCLUDES = []
DEFAULT_BIN_PATH_EXCLUDES = []
def __init__(self):
self.name = None
self.version = None
self.description = None
self.target_name = None
self._target_dir = None
self.icon = None
self.data_files = []
self.includes = self.DEFAULT_INCLUDES
self.excludes = self.DEFAULT_EXCLUDES
self.bin_includes = self.DEFAULT_BIN_INCLUDES
self.bin_excludes = self.DEFAULT_BIN_EXCLUDES
self.bin_path_includes = self.DEFAULT_BIN_PATH_INCLUDES
self.bin_path_excludes = self.DEFAULT_BIN_PATH_EXCLUDES
self.msvc = os.name == 'nt'
self._py2exe_is_loaded = False
self._pyqt4_added = False
self._pyside_added = False
# Attributes relative to cx_Freeze:
self.executables = []
@property
def target_dir(self):
"""Return target directory (default: 'dist')"""
dirname = self._target_dir
if dirname is None:
return 'dist'
else:
return dirname
@target_dir.setter # analysis:ignore
def target_dir(self, value):
self._target_dir = value
def setup(self, name, version, description, script,
target_name=None, target_dir=None, icon=None,
data_files=None, includes=None, excludes=None,
bin_includes=None, bin_excludes=None,
bin_path_includes=None, bin_path_excludes=None, msvc=None):
"""Setup distribution object
Notes:
* bin_path_excludes is specific to cx_Freeze (ignored if it's None)
* if msvc is None, it's set to True by default on Windows
platforms, False on non-Windows platforms
"""
self.name = name
self.version = strip_version(version) if os.name == 'nt' else version
self.description = description
assert osp.isfile(script)
self.script = script
self.target_name = target_name
self.target_dir = target_dir
self.icon = icon
if data_files is not None:
self.data_files += data_files
if includes is not None:
self.includes += includes
if excludes is not None:
self.excludes += excludes
if bin_includes is not None:
self.bin_includes += bin_includes
if bin_excludes is not None:
self.bin_excludes += bin_excludes
if bin_path_includes is not None:
self.bin_path_includes += bin_path_includes
if bin_path_excludes is not None:
self.bin_path_excludes += bin_path_excludes
if msvc is not None:
self.msvc = msvc
if self.msvc:
try:
self.data_files += create_msvc_data_files()
except IOError:
print("Setting the msvc option to False "\
"will avoid this error", file=sys.stderr)
raise
# cx_Freeze:
self.add_executable(self.script, self.target_name, icon=self.icon)
def add_text_data_file(self, filename, contents):
"""Create temporary data file *filename* with *contents*
and add it to *data_files*"""
open(filename, 'wb').write(contents)
self.data_files += [("", (filename, ))]
_remove_later(filename)
def add_data_file(self, filename, destdir=''):
self.data_files += [(destdir, (filename, ))]
#------ Adding packages
def add_pyqt4(self):
"""Include module PyQt4 to the distribution"""
if self._pyqt4_added:
return
self._pyqt4_added = True
self.includes += ['sip', 'PyQt4.Qt', 'PyQt4.QtSvg', 'PyQt4.QtNetwork']
import PyQt4
pyqt_path = osp.dirname(PyQt4.__file__)
# Configuring PyQt4
conf = os.linesep.join(["[Paths]", "Prefix = .", "Binaries = ."])
self.add_text_data_file('qt.conf', conf)
# Including plugins (.svg icons support, QtDesigner support, ...)
if self.msvc:
vc90man = "Microsoft.VC90.CRT.manifest"
pyqt_tmp = 'pyqt_tmp'
if osp.isdir(pyqt_tmp):
shutil.rmtree(pyqt_tmp)
os.mkdir(pyqt_tmp)
vc90man_pyqt = osp.join(pyqt_tmp, vc90man)
man = open(vc90man, "r").read().replace('<file name="',
'<file name="Microsoft.VC90.CRT\\')
open(vc90man_pyqt, 'w').write(man)
for dirpath, _, filenames in os.walk(osp.join(pyqt_path,
"plugins")):
filelist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1] in ('.dll', '.py')]
if self.msvc and [f for f in filelist
if osp.splitext(f)[1] == '.dll']:
# Where there is a DLL build with Microsoft Visual C++ 2008,
# there must be a manifest file as well...
# ...congrats to Microsoft for this great simplification!
filelist.append(vc90man_pyqt)
self.data_files.append( (dirpath[len(pyqt_path)+len(os.pathsep):],
filelist) )
if self.msvc:
atexit.register(remove_dir, pyqt_tmp)
# Including french translation
fr_trans = osp.join(pyqt_path, "translations", "qt_fr.qm")
if osp.exists(fr_trans):
self.data_files.append(('translations', (fr_trans, )))
def add_pyside(self):
"""Include module PySide to the distribution"""
if self._pyside_added:
return
self._pyside_added = True
self.includes += ['PySide.QtDeclarative', 'PySide.QtHelp',
'PySide.QtMultimedia', 'PySide.QtNetwork',
'PySide.QtOpenGL', 'PySide.QtScript',
'PySide.QtScriptTools', 'PySide.QtSql',
'PySide.QtSvg', 'PySide.QtTest',
'PySide.QtUiTools', 'PySide.QtWebKit',
'PySide.QtXml', 'PySide.QtXmlPatterns']
import PySide
pyside_path = osp.dirname(PySide.__file__)
# Configuring PySide
conf = os.linesep.join(["[Paths]", "Prefix = .", "Binaries = ."])
self.add_text_data_file('qt.conf', conf)
# Including plugins (.svg icons support, QtDesigner support, ...)
if self.msvc:
vc90man = "Microsoft.VC90.CRT.manifest"
os.mkdir('pyside_tmp')
vc90man_pyside = osp.join('pyside_tmp', vc90man)
man = open(vc90man, "r").read().replace('<file name="',
'<file name="Microsoft.VC90.CRT\\')
open(vc90man_pyside, 'w').write(man)
for dirpath, _, filenames in os.walk(osp.join(pyside_path, "plugins")):
filelist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1] in ('.dll', '.py')]
if self.msvc and [f for f in filelist
if osp.splitext(f)[1] == '.dll']:
# Where there is a DLL build with Microsoft Visual C++ 2008,
# there must be a manifest file as well...
# ...congrats to Microsoft for this great simplification!
filelist.append(vc90man_pyside)
self.data_files.append(
(dirpath[len(pyside_path)+len(os.pathsep):], filelist) )
# Replacing dlls found by cx_Freeze by the real PySide Qt dlls:
# (http://qt-project.org/wiki/Packaging_PySide_applications_on_Windows)
dlls = [osp.join(pyside_path, fname)
for fname in os.listdir(pyside_path)
if osp.splitext(fname)[1] == '.dll']
self.data_files.append( ('', dlls) )
if self.msvc:
atexit.register(remove_dir, 'pyside_tmp')
# Including french translation
fr_trans = osp.join(pyside_path, "translations", "qt_fr.qm")
if osp.exists(fr_trans):
self.data_files.append(('translations', (fr_trans, )))
def add_qt_bindings(self):
"""Include Qt bindings, i.e. PyQt4 or PySide"""
try:
imp.find_module('PyQt4')
self.add_modules('PyQt4')
except ImportError:
self.add_modules('PySide')
def add_matplotlib(self):
"""Include module Matplotlib to the distribution"""
if 'matplotlib' in self.excludes:
self.excludes.pop(self.excludes.index('matplotlib'))
try:
import matplotlib.numerix # analysis:ignore
self.includes += ['matplotlib.numerix.ma',
'matplotlib.numerix.fft',
'matplotlib.numerix.linear_algebra',
'matplotlib.numerix.mlab',
'matplotlib.numerix.random_array']
except ImportError:
pass
self.add_module_data_files('matplotlib', ('mpl-data', ),
('.conf', '.glade', '', '.png', '.svg',
'.xpm', '.ppm', '.npy', '.afm', '.ttf'))
def add_modules(self, *module_names):
"""Include module *module_name*"""
for module_name in module_names:
print("Configuring module '%s'" % module_name)
if module_name == 'PyQt4':
self.add_pyqt4()
elif module_name == 'PySide':
self.add_pyside()
elif module_name == 'scipy.io':
self.includes += ['scipy.io.matlab.streams']
elif module_name == 'matplotlib':
self.add_matplotlib()
elif module_name == 'h5py':
import h5py
for attr in ['_stub', '_sync', 'utils', '_conv', '_proxy',
'defs']:
if hasattr(h5py, attr):
self.includes.append('h5py.%s' % attr)
if self.bin_path_excludes is not None and os.name == 'nt':
# Specific to cx_Freeze on Windows: avoid including a zlib dll
# built with another version of Microsoft Visual Studio
self.bin_path_excludes += [r'C:\Program Files',
r'C:\Program Files (x86)']
self.data_files.append( # necessary for cx_Freeze only
('', (osp.join(get_module_path('h5py'), 'zlib1.dll'), ))
)
elif module_name in ('docutils', 'rst2pdf', 'sphinx'):
self.includes += ['docutils.writers.null',
'docutils.languages.en',
'docutils.languages.fr']
if module_name == 'rst2pdf':
self.add_module_data_files("rst2pdf", ("styles", ),
('.json', '.style'),
copy_to_root=True)
if module_name == 'sphinx':
import sphinx.ext
for fname in os.listdir(osp.dirname(sphinx.ext.__file__)):
if osp.splitext(fname)[1] == '.py':
modname = 'sphinx.ext.%s' % osp.splitext(fname)[0]
self.includes.append(modname)
elif module_name == 'pygments':
self.includes += ['pygments', 'pygments.formatters',
'pygments.lexers', 'pygments.lexers.agile']
elif module_name == 'zmq':
# FIXME: this is not working, yet... (missing DLL)
self.includes += ['zmq', 'zmq.core._poll', 'zmq.core._version', 'zmq.core.constants', 'zmq.core.context', 'zmq.core.device', 'zmq.core.error', 'zmq.core.message', 'zmq.core.socket', 'zmq.core.stopwatch']
if os.name == 'nt':
self.bin_includes += ['libzmq.dll']
elif module_name == 'guidata':
self.add_module_data_files('guidata', ("images", ),
('.png', '.svg'), copy_to_root=False)
try:
imp.find_module('PyQt4')
self.add_pyqt4()
except ImportError:
self.add_pyside()
elif module_name == 'guiqwt':
self.add_module_data_files('guiqwt', ("images", ),
('.png', '.svg'), copy_to_root=False)
if os.name == 'nt':
# Specific to cx_Freeze: including manually MinGW DLLs
self.bin_includes += ['libgcc_s_dw2-1.dll',
'libstdc++-6.dll']
else:
try:
# Modules based on the same scheme as guidata and guiqwt
self.add_module_data_files(module_name, ("images", ),
('.png', '.svg'), copy_to_root=False)
except IOError:
raise RuntimeError("Module not supported: %s" % module_name)
def add_module_data_dir(self, module_name, data_dir_name, extensions,
copy_to_root=True, verbose=False,
exclude_dirs=[]):
"""
Collect data files in *data_dir_name* for module *module_name*
and add them to *data_files*
*extensions*: list of file extensions, e.g. ('.png', '.svg')
"""
module_dir = get_module_path(module_name)
nstrip = len(module_dir) + len(osp.sep)
data_dir = osp.join(module_dir, data_dir_name)
if not osp.isdir(data_dir):
raise IOError("Directory not found: %s" % data_dir)
for dirpath, _dirnames, filenames in os.walk(data_dir):
dirname = dirpath[nstrip:]
if osp.basename(dirpath) in exclude_dirs:
continue
if not copy_to_root:
dirname = osp.join(module_name, dirname)
pathlist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1].lower() in extensions]
self.data_files.append( (dirname, pathlist) )
if verbose:
for name in pathlist:
print(" ", name)
def add_module_data_files(self, module_name, data_dir_names, extensions,
copy_to_root=True, verbose=False,
exclude_dirs=[]):
"""
Collect data files for module *module_name* and add them to *data_files*
*data_dir_names*: list of dirnames, e.g. ('images', )
*extensions*: list of file extensions, e.g. ('.png', '.svg')
"""
print("Adding module '%s' data files in %s (%s)"\
% (module_name, ", ".join(data_dir_names), ", ".join(extensions)))
module_dir = get_module_path(module_name)
for data_dir_name in data_dir_names:
self.add_module_data_dir(module_name, data_dir_name, extensions,
copy_to_root, verbose, exclude_dirs)
translation_file = osp.join(module_dir, "locale", "fr", "LC_MESSAGES",
"%s.mo" % module_name)
if osp.isfile(translation_file):
self.data_files.append((osp.join(module_name, "locale", "fr",
"LC_MESSAGES"), (translation_file, )))
print("Adding module '%s' translation file: %s" % (module_name,
osp.basename(translation_file)))
def build(self, library, cleanup=True, create_archive=None):
"""Build executable with given library.
library:
* 'py2exe': deploy using the `py2exe` library
* 'cx_Freeze': deploy using the `cx_Freeze` library
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
if library == 'py2exe':
self.build_py2exe(cleanup=cleanup,
create_archive=create_archive)
elif library == 'cx_Freeze':
self.build_cx_freeze(cleanup=cleanup,
create_archive=create_archive)
else:
raise RuntimeError("Unsupported library %s" % library)
def __cleanup(self):
"""Remove old build and dist directories"""
remove_dir("build")
if osp.isdir("dist"):
remove_dir("dist")
remove_dir(self.target_dir)
def __create_archive(self, option):
"""Create a ZIP archive
option:
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
name = self.target_dir
os.system('zip "%s.zip" -r "%s"' % (name, name))
if option == 'move':
shutil.rmtree(name)
def build_py2exe(self, cleanup=True, compressed=2, optimize=2,
company_name=None, copyright=None, create_archive=None):
"""Build executable with py2exe
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
from distutils.core import setup
import py2exe # Patching distutils -- analysis:ignore
self._py2exe_is_loaded = True
if cleanup:
self.__cleanup()
sys.argv += ["py2exe"]
options = dict(compressed=compressed, optimize=optimize,
includes=self.includes, excludes=self.excludes,
dll_excludes=self.bin_excludes,
dist_dir=self.target_dir)
windows = dict(name=self.name, description=self.description,
script=self.script, icon_resources=[(0, self.icon)],
bitmap_resources=[], other_resources=[],
dest_base=osp.splitext(self.target_name)[0],
version=self.version,
company_name=company_name, copyright=copyright)
setup(data_files=self.data_files, windows=[windows,],
options=dict(py2exe=options))
if create_archive:
self.__create_archive(create_archive)
def add_executable(self, script, target_name, icon=None):
"""Add executable to the cx_Freeze distribution
Not supported for py2exe"""
from cx_Freeze import Executable
base = None
if script.endswith('.pyw') and os.name == 'nt':
base = 'win32gui'
self.executables += [Executable(self.script, base=base, icon=self.icon,
targetName=self.target_name)]
def build_cx_freeze(self, cleanup=True, create_archive=None):
"""Build executable with cx_Freeze
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
assert not self._py2exe_is_loaded, \
"cx_Freeze can't be executed after py2exe"
from cx_Freeze import setup
if cleanup:
self.__cleanup()
sys.argv += ["build"]
build_exe = dict(include_files=to_include_files(self.data_files),
includes=self.includes, excludes=self.excludes,
bin_excludes=self.bin_excludes,
bin_includes=self.bin_includes,
bin_path_includes=self.bin_path_includes,
bin_path_excludes=self.bin_path_excludes,
build_exe=self.target_dir)
setup(name=self.name, version=self.version,
description=self.description, executables=self.executables,
options=dict(build_exe=build_exe))
if create_archive:
self.__create_archive(create_archive)
| gpl-3.0 |
akirasosa/mobile-semantic-segmentation | src/mylib/torch/nn/modules/gauss_rank_transform.py | 1 | 1781 | import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
class GaussRankTransform(nn.Module):
def __init__(self, data: torch.Tensor, eps=1e-6):
super(GaussRankTransform, self).__init__()
tformed = self._erfinv(data, eps)
data, sort_idx = data.sort()
self.register_buffer('src', data)
self.register_buffer('dst', tformed[sort_idx])
@staticmethod
def _erfinv(data: torch.Tensor, eps):
rank = data.argsort().argsort().float()
rank_scaled = (rank / rank.max() - 0.5) * 2
rank_scaled = rank_scaled.clamp(-1 + eps, 1 - eps)
tformed = rank_scaled.erfinv()
return tformed
def forward(self, x):
return self._transform(x, self.dst, self.src)
def invert(self, x):
return self._transform(x, self.src, self.dst)
def _transform(self, x, src, dst):
pos = src.argsort()[x.argsort().argsort()]
N = len(self.src)
pos[pos >= N] = N - 1
pos[pos - 1 <= 0] = 0
x1 = dst[pos]
x2 = dst[pos - 1]
y1 = src[pos]
y2 = src[pos - 1]
relative = (x - x2) / (x1 - x2)
return (1 - relative) * y2 + relative * y1
# %%
if __name__ == '__main__':
# %%
x = torch.from_numpy(np.random.uniform(low=0, high=1, size=2000))
grt = GaussRankTransform(x)
x_tformed = grt.forward(x)
x_inv = grt.invert(x_tformed)
# %%
print(x)
print(x_inv)
print(grt.dst)
print(torch.sort(x_tformed)[0])
bins = 100
plt.hist(x, bins=bins)
plt.show()
plt.hist(x_inv, bins=bins)
plt.show()
plt.hist(grt.src, bins=bins)
plt.show()
plt.hist(x_tformed, bins=bins)
plt.show()
plt.hist(grt.dst, bins=bins)
plt.show()
| mit |
kirangonella/BuildingMachineLearningSystemsWithPython | ch09/01_fft_based_classifier.py | 24 | 3740 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_pr, plot_roc, plot_confusion_matrix, GENRE_LIST
from fft import read_fft
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_pr(pr_scores[label][median], desc, precisions[label][median],
recalls[label][median], label='%s vs rest' % genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_fft(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg FFT", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "fft",
"Confusion matrix of an FFT based classifier")
| mit |
pombredanne/dask | dask/dataframe/tests/test_io.py | 1 | 25136 | import gzip
import pandas as pd
import numpy as np
import pandas.util.testing as tm
import os
import dask
from operator import getitem
import pytest
from toolz import valmap
import tempfile
import shutil
from time import sleep
import threading
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.io import (read_csv, file_size, dataframe_from_ctable,
from_array, from_bcolz, infer_header, from_dask_array)
from dask.compatibility import StringIO
from dask.utils import filetext, tmpfile, ignoring
from dask.async import get_sync
########
# CSVS #
########
text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""".strip()
def test_read_csv():
with filetext(text) as fn:
f = dd.read_csv(fn, chunkbytes=30)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions > 1
result = f.compute(get=dask.get).sort('name')
assert (result.values == pd.read_csv(fn).sort('name').values).all()
def test_read_gzip_csv():
with filetext(text.encode(), open=gzip.open) as fn:
f = dd.read_csv(fn, chunkbytes=30, compression='gzip')
assert list(f.columns) == ['name', 'amount']
assert f.npartitions > 1
result = f.compute(get=dask.get).sort('name')
assert (result.values == pd.read_csv(fn, compression='gzip').sort('name').values).all()
def test_file_size():
counts = (len(text), len(text) + text.count('\n'))
with filetext(text) as fn:
assert file_size(fn) in counts
with filetext(text.encode(), open=gzip.open) as fn:
assert file_size(fn, 'gzip') in counts
def test_read_multiple_csv():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
df = dd.read_csv('_foo.*.csv')
assert (len(read_csv('_foo.*.csv').compute()) ==
len(read_csv('_foo.1.csv').compute()) * 2)
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
def normalize_text(s):
return '\n'.join(map(str.strip, s.strip().split('\n')))
def test_consistent_dtypes():
text = normalize_text("""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, chunkbytes=30)
assert isinstance(df.amount.sum().compute(), float)
def test_infer_header():
with filetext('name,val\nAlice,100\nNA,200') as fn:
assert infer_header(fn) == True
with filetext('Alice,100\nNA,200') as fn:
assert infer_header(fn) == False
def eq(a, b):
if hasattr(a, 'dask'):
a = a.compute(get=dask.async.get_sync)
if hasattr(b, 'dask'):
b = b.compute(get=dask.async.get_sync)
if isinstance(a, pd.DataFrame):
a = a.sort_index()
b = b.sort_index()
tm.assert_frame_equal(a, b)
return True
if isinstance(a, pd.Series):
tm.assert_series_equal(a, b)
return True
assert np.allclose(a, b)
return True
datetime_csv_file = """
name,amount,when
Alice,100,2014-01-01
Bob,200,2014-01-01
Charlie,300,2014-01-01
Dan,400,2014-01-01
""".strip()
def test_read_csv_index():
with filetext(text) as fn:
f = dd.read_csv(fn, chunkbytes=20, index='amount')
result = f.compute(get=get_sync)
assert result.index.name == 'amount'
blocks = dd.DataFrame._get(f.dask, f._keys(), get=get_sync)
for i, block in enumerate(blocks):
if i < len(f.divisions) - 2:
assert (block.index < f.divisions[i + 1]).all()
if i > 0:
assert (block.index >= f.divisions[i]).all()
expected = pd.read_csv(fn).set_index('amount')
eq(result, expected)
def test_usecols():
with filetext(datetime_csv_file) as fn:
df = dd.read_csv(fn, chunkbytes=30, usecols=['when', 'amount'])
expected = pd.read_csv(fn, usecols=['when', 'amount'])
assert (df.compute().values == expected.values).all()
####################
# Arrays and BColz #
####################
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert list(d.columns) == ['0', '1', '2']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list('abc'))
assert list(d.columns) == ['a', 'b', 'c']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
pytest.raises(ValueError, dd.from_array, np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', 'i4'), ('b', 'i4')])
d = dd.from_array(x, chunksize=4)
assert list(d.columns) == ['a', 'b']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip('bcolz')
def check():
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == \
sorted(dd.from_bcolz(t, chunksize=2).dask)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dd.from_bcolz(t, chunksize=3).dask)
threads = []
for i in range(5):
thread = threading.Thread(target=check)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_from_bcolz():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == \
sorted(dd.from_bcolz(t, chunksize=2).dask)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dd.from_bcolz(t, chunksize=3).dask)
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4., 'b'))
t.flush()
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dsk)
def test_from_bcolz_filename():
bcolz = pytest.importorskip('bcolz')
with tmpfile('.bcolz') as fn:
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'],
rootdir=fn)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().index) == ['x', 'y', 'a']
def test_skipinitialspace():
text = normalize_text("""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, chunkbytes=20)
assert 'amount' in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes():
text1 = normalize_text("""
name,amount
Alice,100
Bob,-200
Charlie,300
""")
text2 = normalize_text("""
name,amount
1,400
2,-500
Frank,600
""")
try:
with open('_foo.1.csv', 'w') as f:
f.write(text1)
with open('_foo.2.csv', 'w') as f:
f.write(text2)
df = dd.read_csv('_foo.*.csv', chunkbytes=25)
assert df.amount.max().compute() == 600
finally:
pass
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_compression_multiple_files():
tdir = tempfile.mkdtemp()
try:
f = gzip.open(os.path.join(tdir, 'a.csv.gz'), 'wb')
f.write(text.encode())
f.close()
f = gzip.open(os.path.join(tdir, 'b.csv.gz'), 'wb')
f.write(text.encode())
f.close()
df = dd.read_csv(os.path.join(tdir, '*.csv.gz'), compression='gzip')
assert len(df.compute()) == (len(text.split('\n')) - 1) * 2
finally:
shutil.rmtree(tdir)
def test_empty_csv_file():
with filetext('a,b') as fn:
df = dd.read_csv(fn, header=0)
assert len(df.compute()) == 0
assert list(df.columns) == ['a', 'b']
def test_from_pandas_dataframe():
a = list('aaaaaaabbbbbbbbccccccc')
df = pd.DataFrame(dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start='20120101', periods=len(a)))
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({'x': [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n),
index=pd.date_range(start='20120101', periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({'x': [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
eq(df, ddf)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = from_dask_array(x, ['a', 'b', 'c'])
assert list(df.columns) == ['a', 'b', 'c']
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(get=get_sync).values == x.compute(get=get_sync)).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=['a', 'b', 'c'])
assert df2.columns == df.columns
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = from_dask_array(x, 'a')
assert ser.name == 'a'
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(get=get_sync).values == x.compute(get=get_sync)).all()
ser = from_dask_array(x)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert eq(ser, ser2)
def test_from_dask_array_raises():
x = da.ones((3, 3, 3), chunks=2)
pytest.raises(ValueError, lambda: from_dask_array(x))
x = da.ones((10, 3), chunks=(3, 3))
pytest.raises(ValueError, lambda: from_dask_array(x)) # no columns
# Not enough columns
pytest.raises(ValueError, lambda: from_dask_array(x, columns=['a']))
try:
from_dask_array(x, columns=['hello'])
except Exception as e:
assert 'hello' in str(e)
assert '3' in str(e)
def test_from_dask_array_struct_dtype():
x = np.array([(1, 'a'), (2, 'b')], dtype=[('a', 'i4'), ('b', 'object')])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
assert tuple(df.columns) == y.dtype.names
eq(df, pd.DataFrame(x))
eq(dd.from_dask_array(y, columns=['b', 'a']),
pd.DataFrame(x, columns=['b', 'a']))
def test_to_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
b = c.to_dask()
try:
tm.assert_frame_equal(df, c[:])
tm.assert_frame_equal(b.compute(), df)
finally:
c.drop()
c = a.to_castra(categories=['x'])
try:
assert c[:].dtypes['x'] == 'category'
finally:
c.drop()
c = a.to_castra(sorted_index_column='y')
try:
tm.assert_frame_equal(c[:], df.set_index('y'))
finally:
c.drop()
dsk, keys = a.to_castra(compute=False)
assert isinstance(dsk, dict)
assert isinstance(keys, list)
c, last = keys
assert last[1] == a.npartitions - 1
def test_from_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
with_castra = dd.from_castra(c)
with_fn = dd.from_castra(c.path)
with_columns = dd.from_castra(c, 'x')
try:
tm.assert_frame_equal(df, with_castra.compute())
tm.assert_frame_equal(df, with_fn.compute())
tm.assert_series_equal(df.x, with_columns.compute())
finally:
# Calling c.drop() is a race condition on drop from `with_fn.__del__`
# and c.drop. Manually `del`ing gets around this.
del with_fn, c
def test_from_castra_with_selection():
""" Optimizations fuse getitems with load_partitions
We used to use getitem for both column access and selections
"""
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = dd.from_castra(a.to_castra())
assert eq(b[b.y > 3].x, df[df.y > 3].x)
def test_to_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
with tmpfile('h5') as fn:
a.x.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_series_equal(df.x, out[:])
a = dd.from_pandas(df, 1)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
def test_read_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data')
try:
dd.read_hdf(fn, '/data', chunksize=2)
assert False
except TypeError as e:
assert "format='table'" in str(e)
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2)
assert a.npartitions == 2
tm.assert_frame_equal(a.compute(), df)
tm.assert_frame_equal(
dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3).compute(),
pd.read_hdf(fn, '/data', start=1, stop=3))
assert sorted(dd.read_hdf(fn, '/data').dask) == \
sorted(dd.read_hdf(fn, '/data').dask)
def test_to_csv():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn)
result = pd.read_csv(fn, index_col=0)
tm.assert_frame_equal(result, df)
@pytest.mark.xfail
def test_to_csv_gzip():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn, compression='gzip')
result = pd.read_csv(fn, index_col=0, compression='gzip')
tm.assert_frame_equal(result, df)
def test_to_csv_series():
s = pd.Series([1, 2, 3], index=[10, 20, 30], name='foo')
a = dd.from_pandas(s, 2)
with tmpfile('csv') as fn:
with tmpfile('csv') as fn2:
a.to_csv(fn)
s.to_csv(fn2)
with open(fn) as f:
adata = f.read()
with open(fn2) as f:
sdata = f.read()
assert adata == sdata
def test_read_csv_with_nrows():
with filetext(text) as fn:
f = dd.read_csv(fn, nrows=3)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions == 1
assert eq(dd.read_csv(fn, nrows=3), pd.read_csv(fn, nrows=3))
def test_read_csv_raises_on_no_files():
try:
dd.read_csv('21hflkhfisfshf.*.csv')
assert False
except Exception as e:
assert "21hflkhfisfshf.*.csv" in str(e)
def test_read_csv_has_deterministic_name():
with filetext(text) as fn:
a = dd.read_csv(fn)
b = dd.read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys()) == sorted(b.dask.keys())
assert isinstance(a._name, str)
c = dd.read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
a = dd.read_csv('_foo.*.csv')
b = dd.read_csv('_foo.*.csv')
assert sorted(a.dask.keys()) == sorted(b.dask.keys())
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(text) as fn:
mtime = os.path.getmtime(fn)
sleep(1)
a = dd.read_csv(fn)
sleep(1)
with open(fn, 'a') as f:
f.write('\nGeorge,700')
os.fsync(f)
b = dd.read_csv(fn)
assert sorted(a.dask) != sorted(b.dask)
def test_to_bag():
pytest.importorskip('dask.bag')
a = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute(get=get_sync) == list(a.itertuples(False))
assert ddf.to_bag(True).compute(get=get_sync) == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute(get=get_sync) == list(a.x.iteritems())
assert ddf.x.to_bag().compute(get=get_sync) == list(a.x)
def test_csv_expands_dtypes():
with filetext(text) as fn:
a = dd.read_csv(fn, chunkbytes=30, dtype={})
a_kwargs = list(a.dask.values())[0][-2]
b = dd.read_csv(fn, chunkbytes=30)
b_kwargs = list(b.dask.values())[0][-2]
assert a_kwargs['dtype'] == b_kwargs['dtype']
a = dd.read_csv(fn, chunkbytes=30, dtype={'amount': float})
a_kwargs = list(a.dask.values())[0][-2]
assert a_kwargs['dtype']['amount'] == float
def test_report_dtype_correction_on_csvs():
text = 'numbers,names\n'
for i in range(1000):
text += '1,foo\n'
text += '1.5,bar\n'
with filetext(text) as fn:
try:
dd.read_csv(fn).compute(get=get_sync)
assert False
except ValueError as e:
assert "'numbers': 'float64'" in str(e)
def test_hdf_globbing():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
tdir = tempfile.mkdtemp()
try:
df.to_hdf(os.path.join(tdir, 'one.h5'), '/foo/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/bar/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/foo/data', format='table')
with dask.set_options(get=dask.get):
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2)
assert res.npartitions == 2
tm.assert_frame_equal(res.compute(), df)
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2, start=1, stop=3)
expected = pd.read_hdf(os.path.join(tdir, 'one.h5'), '/foo/data',
start=1, stop=3)
tm.assert_frame_equal(res.compute(), expected)
res = dd.read_hdf(os.path.join(tdir, 'two.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/foo/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))
finally:
shutil.rmtree(tdir)
def test_index_col():
with filetext(text) as fn:
try:
f = read_csv(fn, chunkbytes=30, index_col='name')
assert False
except ValueError as e:
assert 'set_index' in str(e)
timeseries = """
Date,Open,High,Low,Close,Volume,Adj Close
2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005
2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004
2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993
2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996
2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003
2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005
2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995
2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999
2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993
""".strip()
def test_read_csv_with_datetime_index_partitions_one():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# chunkbytes set to explicitly set to single chunk
ddf = dd.read_csv(fn, index='Date', header=0, usecols=[0, 4],
parse_dates=['Date'], chunkbytes=10000000)
eq(df, ddf)
# because fn is so small, by default, this will only be one chunk
ddf = dd.read_csv(fn, index='Date', header=0, usecols=[0, 4],
parse_dates=['Date'])
eq(df, ddf)
def test_read_csv_with_datetime_index_partitions_n():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# because fn is so small, by default, set chunksize small
ddf = dd.read_csv(fn, index='Date', header=0, usecols=[0, 4],
parse_dates=['Date'], chunkbytes=400)
eq(df, ddf)
def test_from_pandas_with_datetime_index():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
ddf = dd.from_pandas(df, 2)
eq(df, ddf)
@pytest.mark.parametrize('encoding', ['utf-16', 'utf-16-le', 'utf-16-be'])
def test_encoding_gh601(encoding):
ar = pd.Series(range(0, 100))
br = ar % 7
cr = br * 3.3
dr = br / 1.9836
test_df = pd.DataFrame({'a': ar, 'b': br, 'c': cr, 'd': dr})
with tmpfile('.csv') as fn:
test_df.to_csv(fn, encoding=encoding, index=False)
a = pd.read_csv(fn, encoding=encoding)
d = dd.read_csv(fn, encoding=encoding, chunkbytes=1000)
d = d.compute()
d.index = range(len(d.index))
assert eq(d, a)
def test_read_hdf_doesnt_segfault():
pytest.importorskip('tables')
with tmpfile('h5') as fn:
N = 40
df = pd.DataFrame(np.random.randn(N, 3))
with pd.HDFStore(fn, mode='w') as store:
store.append('/x', df)
ddf = dd.read_hdf(fn, '/x', chunksize=2)
assert len(ddf) == N
def test_read_csv_header_issue_823():
text = '''a b c\n1 2 3\n4 5 6'''.replace(' ', '\t')
with filetext(text) as fn:
df = dd.read_csv(fn, sep='\t')
eq(df, pd.read_csv(fn, sep='\t'))
df = dd.read_csv(fn, delimiter='\t')
eq(df, pd.read_csv(fn, delimiter='\t'))
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/test_clipboard.py | 2 | 4012 | # -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
import nose
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf, disabled
try:
import pandas.util.clipboard # noqa
except OSError:
raise nose.SkipTest("no clipboard found")
@disabled
class TestClipboard(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestClipboard, cls).setUpClass()
cls.data = {}
cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['float'] = mkdf(5, 3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
# Test columns exceeding "max_colwidth" (GH8305)
_cw = get_option('display.max_colwidth') + 1
cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test GH-5346
max_rows = get_option('display.max_rows')
cls.data['longdf'] = mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
cls.data_types = list(cls.data.keys())
@classmethod
def tearDownClass(cls):
super(TestClipboard, cls).tearDownClass()
del cls.data_types, cls.data
def check_round_trip_frame(self, data_type, excel=None, sep=None):
data = self.data[data_type]
data.to_clipboard(excel=excel, sep=sep)
if sep is not None:
result = read_clipboard(sep=sep, index_col=0)
else:
result = read_clipboard()
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, sep=',')
def test_round_trip_frame_string(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, excel=False)
def test_round_trip_frame(self):
for dt in self.data_types:
self.check_round_trip_frame(dt)
def test_read_clipboard_infer_excel(self):
from textwrap import dedent
from pandas.util.clipboard import clipboard_set
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard()
# excel data is parsed correctly
self.assertEqual(df.iloc[1][1], 'Harry Carney')
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard()
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard()
tm.assert_frame_equal(res, exp)
| mit |
google-research/google-research | stacked_capsule_autoencoders/eval_mnist_model.py | 1 | 4762 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import pdb
import sys
import traceback
from absl import flags
from absl import logging
from monty.collections import AttrDict
import sklearn.cluster
import tensorflow.compat.v1 as tf
from stacked_capsule_autoencoders.capsules.configs import data_config
from stacked_capsule_autoencoders.capsules.configs import model_config
from stacked_capsule_autoencoders.capsules.eval import cluster_classify
from stacked_capsule_autoencoders.capsules.eval import collect_results
from stacked_capsule_autoencoders.capsules.plot import make_tsne_plot
from stacked_capsule_autoencoders.capsules.train import tools
flags.DEFINE_string('snapshot', '', 'Checkpoint file.')
flags.DEFINE_string('tsne_figure_name', 'tsne.png', 'Filename for the TSNE '
'figure. It will be saved in the checkpoint folder.')
# These two flags are necessary for model loading. Don't change them!
flags.DEFINE_string('dataset', 'mnist', 'Don\'t change!')
flags.DEFINE_string('model', 'scae', 'Don\'t change!.')
def _collect_results(sess, tensors, dataset, n_batches):
"""Collects some tensors from many batches."""
to_collect = AttrDict(
prior_pres=tensors.caps_presence_prob,
posterior_pres=tensors.posterior_mixing_probs,
posterior_acc=tensors.posterior_cls_acc,
prior_acc=tensors.prior_cls_acc,
label=dataset['label']
)
vals = collect_results(sess, to_collect, n_batches)
vals.posterior_pres = vals.posterior_pres.sum(1)
return vals
def main(_=None):
FLAGS = flags.FLAGS # pylint: disable=invalid-name,redefined-outer-name
config = FLAGS
FLAGS.__dict__['config'] = config
# Build the graph
with tf.Graph().as_default():
model_dict = model_config.get(FLAGS)
data_dict = data_config.get(FLAGS)
model = model_dict.model
trainset = data_dict.trainset
validset = data_dict.validset
# Optimisation target
validset = tools.maybe_convert_dataset(validset)
trainset = tools.maybe_convert_dataset(trainset)
train_tensors = model(trainset)
valid_tensors = model(validset)
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, FLAGS.snapshot)
valid_results = _collect_results(sess, valid_tensors, validset,
10000 // FLAGS.batch_size)
train_results = _collect_results(sess, train_tensors, trainset,
60000 // FLAGS.batch_size)
results = AttrDict(train=train_results, valid=valid_results)
# Linear classification
print('Linear classification accuracy:')
for k, v in results.items():
print('\t{}: prior={:.04f}, posterior={:.04f}'.format(
k, v.prior_acc.mean(), v.posterior_acc.mean()))
# Unsupervised classification via clustering
print('Bipartite matching classification accuracy:')
for field in 'posterior_pres prior_pres'.split():
kmeans = sklearn.cluster.KMeans(
n_clusters=10,
precompute_distances=True,
n_jobs=-1,
max_iter=1000,
).fit(results.train[field])
train_acc = cluster_classify(results.train[field], results.train.label, 10,
kmeans)
valid_acc = cluster_classify(results.valid[field], results.valid.label, 10,
kmeans)
print('\t{}: train_acc={:.04f}, valid_acc={:.04f}'.format(field, train_acc,
valid_acc))
checkpoint_folder = osp.dirname(FLAGS.snapshot)
figure_filename = osp.join(checkpoint_folder, FLAGS.tsne_figure_name)
print('Savign TSNE plot at "{}"'.format(figure_filename))
make_tsne_plot(valid_results.posterior_pres, valid_results.label,
figure_filename)
if __name__ == '__main__':
try:
logging.set_verbosity(logging.INFO)
tf.app.run()
except Exception as err: # pylint: disable=broad-except
FLAGS = flags.FLAGS
last_traceback = sys.exc_info()[2]
traceback.print_tb(last_traceback)
print(err)
pdb.post_mortem(last_traceback)
| apache-2.0 |
xuewei4d/scikit-learn | examples/linear_model/plot_lasso_dense_vs_sparse_data.py | 23 | 1844 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets import make_regression
from sklearn.linear_model import Lasso
# #############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
# #############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
McIntyre-Lab/papers | fear_sem_sd_2015/scripts/ipython_startup.py | 1 | 1592 | from __future__ import division
# Run these command when starting an ipython notebook. This is simply for
# convience, so you don't have to do all of this in every python notebook.
# Standard Import
print("Importing commonly used libraries: os, sys, numpy as np, scipy as sp, pandas as pd, matplotlib as mp, matplotlib.pyplot as plt, datetime as dt")
import os
import sys
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mp
import matplotlib.pyplot as plt
import datetime as dt
# Set current date
TODAY = dt.date.today().strftime("%Y%m%d")
# Set up paths
## Get mclab env
### I have an environmental variable MCLAB that points to a share directory that
### I use across all of my machines.
MCLAB = os.getenv('MCLAB')
## Get Current project directory
### This is the project folder on the shared drive.
PROJ = os.path.join(MCLAB, 'cegs_sem_sd_paper')
print("Creating project level variables: MCLAB = {}, PROJ = {}, TODAY = {}".format(MCLAB, PROJ, TODAY))
## Add the mclib_Python libraries to PYTHONPATH
### You can point to any copy of this library. I like to keep a copy for each
### project so I know exactly what has been run.
sys.path.append(os.path.join(PROJ, 'scripts/mclib_Python'))
## Add project level library
# Run IPython Magics
from IPython import get_ipython
ipython = get_ipython()
## Have matplotlib plot inline
ipython.magic("matplotlib inline")
## Turn on autoreload
### This allows you to modify functions and have them automatically reloaded
### when they are called.
ipython.magic("load_ext autoreload")
ipython.magic("autoreload 2")
| lgpl-3.0 |
wenchaodudu/SNLDA | snlda.py | 1 | 4314 | import numpy as np
import itertools
import pickle
from sne import update_theta
from lda import update_variables, dtm_update
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import LatentDirichletAllocation as LDA, PCA
import progressbar
import pdb
import logging
TOPIC_NUM = 20
DOC_NUM = 0
VOCAB_SIZE = 0
prior_std = np.sqrt(2) / 2
def admm(X, W, k, C, rho, iter_num, init=None):
# initialize variables
TOPIC_NUM = k
DOC_NUM, VOCAB_SIZE = X.shape
theta_1 = np.random.normal(scale=prior_std, size=(DOC_NUM, TOPIC_NUM))
theta_2 = np.copy(theta_1)
'''
theta_1 = np.zeros((DOC_NUM, TOPIC_NUM))
theta_2 = np.zeros((DOC_NUM, TOPIC_NUM))
'''
q_z = [[] for x in range(DOC_NUM)]
dirichlet_prior = np.full(TOPIC_NUM, 2)
beta = np.random.dirichlet(np.full(VOCAB_SIZE, 2), TOPIC_NUM)
u = np.zeros((DOC_NUM, TOPIC_NUM))
bar = progressbar.ProgressBar()
if init is None:
logging.info("Initializing.")
for y in bar(range(VOCAB_SIZE)):
for x in range(DOC_NUM):
if X[x, y] > 0:
q_z[x].append([y, X[x, y], np.random.dirichlet(dirichlet_prior, 1)[0]])
else:
q_z = init
labeled = C != -1
convergence = []
for it in range(iter_num):
print it
theta_2, obj, g = update_theta(theta_1, theta_2, W, C, 1 / (2 * prior_std**2), u, rho, it)
theta_2 -= np.mean(theta_2, axis=1)[:, np.newaxis]
theta_1, q_z, beta = update_variables(X, theta_1, theta_2, q_z, beta, C, prior_std**2, rho, u, it)
theta_1 -= np.mean(theta_1, axis=1)[:, np.newaxis]
u[labeled] += (theta_1[labeled] - theta_2[labeled])
conv = np.linalg.norm(theta_1[labeled] - theta_2[labeled])
convergence.append(conv)
logging.info(conv)
np.save('theta1', theta_1)
np.save('theta2', theta_2)
np.save('q_z', q_z)
'''
if it % 5 == 4:
solution = (theta_1 + theta_2) / 2
low_dim = TSNE().fit_transform(solution)
plt.scatter(low_dim[:, 0], low_dim[:, 1], c=colors)
plt.show()
'''
print convergence
def normalize_exp(arr):
arr -= np.mean(arr, axis=1)[:, np.newaxis]
arr = np.exp(arr)
arr /= np.sum(arr, axis=1)[:, np.newaxis]
return arr
'''
solution_1 = normalize_exp(theta_1)
solution_2 = normalize_exp(theta_2)
solution_1[labeled] = (solution_1[labeled] + solution_2[labeled]) / 2
'''
theta_1[labeled] = (theta_1[labeled] + theta_2[labeled]) / 2
solution = theta_1 - np.mean(theta_1, axis=1)[:, np.newaxis]
return theta_1
def DTM(X, C, k, iter_num, init=None):
# initialize variables
DOC_NUM, VOCAB_SIZE = X.shape
theta = np.random.uniform(low=0, high=1, size=(DOC_NUM, TOPIC_NUM))
theta /= np.sum(theta, axis=1)[:, np.newaxis]
phi = np.random.uniform(low=0, high=1, size=(TOPIC_NUM, VOCAB_SIZE))
phi /= np.sum(phi, axis=1)[:, np.newaxis]
W = np.zeros((DOC_NUM, DOC_NUM))
for k in C:
for x in C[k]:
for y in C[k]:
if y != x:
W[x, y] = 1
W[y, x] = 1
dic = []
bar = progressbar.ProgressBar()
for x in bar(range(DOC_NUM)):
dic.append(np.nonzero(X[x])[1])
theta, phi = dtm_update(X, W, k, theta, phi, iter_num, dic)
return theta
if __name__ == "__main__":
synth = pickle.load(open('synthetic_data'))
data = synth['data']
clusters = synth['clusters']
C = dict()
CC = dict()
labels = np.asarray([int(clusters[x]) if np.random.uniform() < 0.2 else -1 for x in range(data.shape[0])])
categories = set(clusters)
for c in categories:
if c is not None:
C[c] = np.where(clusters==c)[0]
CC[c] = np.where(labels==c)[0]
color = {0: 'g', 1: 'b', 2: 'r', 3: 'y', 4: 'm', 5: 'k', 6:'c', 7:'peru', 8:'coral', 9:'gold'}
colors = [color[x] for x in clusters]
solution = admm(data, CC, 20, labels, 2, 20)
#solution = LDA(n_components=20, learning_method='batch', max_iter=50, n_jobs=2).fit_transform(data)
low_dim = PCA(n_components=2).fit_transform(solution)
plt.scatter(low_dim[:, 0], low_dim[:, 1], c=colors)
plt.show()
| mit |
mohamedhagag/community-addons | stock_card/tests/test_stock_card_negative_stock.py | 1 | 8700 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2015 Vauxoo
# Author : Osval Reyes <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import logging
from openerp.tests.common import TransactionCase
_logger = logging.getLogger(__name__)
try:
import pandas as pd
except ImportError:
_logger.debug('Cannot `import pandas`.')
try:
from tabulate import tabulate
except ImportError:
_logger.debug('Cannot `import tabulate`.')
class TestStockCardNegativeStock(TransactionCase):
def setUp(self):
super(TestStockCardNegativeStock, self).setUp()
self.stock_card = self.env['stock.card']
self.sc_product = self.env['stock.card.product']
self.sc_move = self.env['stock.card.move']
self.move = self.env['stock.move']
self.product_id = self.env.ref('stock_card.product01')
self.location_id = self.env.ref('stock.stock_location_stock')
self.partner_id = self.env.ref('base.res_partner_23')
self.purchase_order = self.env['purchase.order']
self.wizard = self.env['stock.transfer_details']
self.wizard_item = self.env['stock.transfer_details_items']
self.transfer_details = self.env['stock.transfer_details']
self.sale_order = self.env['sale.order']
self.delta = 0
self.next_hour = datetime.strptime('2016-01-01 01:00:00',
'%Y-%m-%d %H:%M:%S')
self.inv_ids = [
{ # 1
'do_purchase': True, 'cost': 20, 'qty': 2,
'avg': 20, 'move_value': 40, 'inv_value': 40,
},
{ # 2
'do_purchase': True, 'cost': 40, 'qty': 3,
'avg': 32, 'move_value': 120, 'inv_value': 160,
},
{ # 3
'do_purchase': False, 'cost': 32, 'qty': 1,
'avg': 32, 'move_value': -32, 'inv_value': 128,
},
{ # 4
'do_purchase': True, 'cost': 88, 'qty': 4,
'avg': 60, 'move_value': 352, 'inv_value': 480,
},
{ # 5
'do_purchase': False, 'cost': 60, 'qty': 7,
'avg': 60, 'move_value': -420, 'inv_value': 60,
},
{ # 6
'do_purchase': False, 'cost': 62, 'qty': 2,
'avg': 64, 'move_value': -124, 'inv_value': -64,
},
{ # 7
'do_purchase': True, 'cost': 64, 'qty': 4,
'avg': 64, 'move_value': 256, 'inv_value': 192,
},
{ # 8
'do_purchase': False, 'cost': 51, 'qty': 6,
'avg': 38, 'move_value': -306, 'inv_value': -114,
},
{ # 9
'do_purchase': False, 'cost': 38, 'qty': 2,
'avg': 38, 'move_value': -76, 'inv_value': -190,
},
{ # 10
'do_purchase': True, 'cost': 48, 'qty': 2,
'avg': 48, 'move_value': 96, 'inv_value': -94,
},
{ # 11
'do_purchase': True, 'cost': 56, 'qty': 2,
'avg': 52, 'move_value': 112, 'inv_value': 18,
},
{ # 12
'do_purchase': True, 'cost': 24, 'qty': 4,
'avg': 38, 'move_value': 96, 'inv_value': 114,
},
]
def do_picking(self, picking_ids=False):
for picking_id in picking_ids:
picking_id.action_assign()
picking_id.force_assign()
picking_id.action_confirm()
wizard_id = self.wizard.create({
'picking_id': picking_id.id,
})
for move_id in picking_id.move_lines:
self.wizard_item.create({
'product_id': move_id.product_id.id,
'transfer_id': wizard_id.id,
'sourceloc_id': move_id.location_id.id,
'quantity': move_id.product_qty,
'destinationloc_id': move_id.location_dest_id.id,
'product_uom_id': move_id.product_uom.id,
})
wizard_id.do_detailed_transfer()
self.assertEqual(picking_id.state, 'done')
for move_id in picking_id.move_lines:
self.delta += 1
self.next_hour = datetime.strptime(
'2016-01-01 01:00:00',
'%Y-%m-%d %H:%M:%S') + timedelta(hours=self.delta)
move_id.write({'date': self.next_hour})
def create_purchase_order(self, qty=False, cost=False):
purchase_order_id = self.purchase_order.create({
'partner_id': self.partner_id.id,
'location_id': self.ref('stock.stock_location_stock'),
'pricelist_id': self.ref('purchase.list0'),
'order_line': [(0, 0, {
'name': "%s (qty=%s, cost=%s)" % (
self.product_id.name, qty, cost),
'product_id': self.product_id.id,
'price_unit': cost,
'product_qty': qty,
'date_planned': datetime.now().strftime('%Y-%m-%d'),
})]
})
purchase_order_id.wkf_confirm_order()
purchase_order_id.action_invoice_create()
purchase_order_id.action_picking_create()
self.do_picking(purchase_order_id.picking_ids)
def create_sale_order(self, qty=False, price=False):
sale_order_id = self.sale_order.create({
'partner_id': self.partner_id.id,
'client_order_ref': "Sale Order (qty=%s, price=%s)" % (
str(qty), str(price)),
'order_policy': 'manual',
'order_line': [(0, 0, {
'product_id': self.product_id.id,
'product_uom_qty': qty,
'price_unit': price,
})]
})
sale_order_id.action_button_confirm()
self.do_picking(sale_order_id.picking_ids)
return sale_order_id
def get_stock_valuations(self):
return self.sc_product._stock_card_move_get(self.product_id.id)['res']
def test_01_do_inouts(self):
for expected in self.inv_ids:
qty = expected['qty']
costprice = expected['cost']
if expected['do_purchase']:
self.product_id.write({
'standard_price': expected['cost']
})
self.create_purchase_order(qty=qty, cost=costprice)
else:
self.create_sale_order(qty=qty, price=costprice)
card_lines = self.get_stock_valuations()
df = pd.DataFrame(card_lines)
tbl_sc = tabulate(df, headers='keys', tablefmt='psql')
_logger.info('Gotten Stock Card \n%s', tbl_sc)
df = pd.DataFrame(self.inv_ids)
tbl_sc = tabulate(df, headers='keys', tablefmt='psql')
_logger.info('Expected Stock Card \n%s', tbl_sc)
self.assertEqual(len(self.inv_ids), len(card_lines),
"Both lists should have the same length(=12)")
for expected, succeded in zip(self.inv_ids, card_lines):
self.assertEqual(expected['avg'],
succeded['average'],
"Average Cost %s is not the expected" % expected)
self.assertEqual(expected['cost'],
succeded['cost_unit'],
"Unit Cost %s is not the expected" % expected)
self.assertEqual(expected['inv_value'],
succeded['inventory_valuation'],
"Inventory Value %s does not match" % expected)
self.assertEqual(expected['move_value'],
succeded['move_valuation'],
"Movement Value %s does not match" % expected)
| agpl-3.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_except.py | 15 | 1043 | #!/usr/bin/env python
# coding: utf-8
import unittest
import nose
import datetime
from pandas.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(unittest.TestCase):
def test_raise_on_find_unsupported_value(self):
import datetime
self.assertRaises(TypeError, packb, datetime.datetime.now())
def test_raise_from_object_hook(self):
def hook(obj):
raise DummyException
self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
def test_invalidvalue(self):
self.assertRaises(ValueError, unpackb, b'\xd9\x97#DL_')
| gpl-2.0 |
Akshay0724/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 127 | 1270 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
ezequielbrrt/ModeloSIR-AutomataCelular | automata.py | 1 | 1373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#Creacion de la Poblacion
datos = open("Poblacion.txt","w")
datos.close()
datos = open("Estados.txt","w")
datos.close()
#generacion de coordenadas
contador = 0
for x in range(1,2000):
longitud = np.random.uniform(-108,-85,1)
latitud = np.random.uniform(14.5,25,1)
lon = longitud[0]
lat = latitud[0]
#poniendo limites
if lat < 16.3 and lon < -92.38:
pass
elif lat < 25 and lat > 18.119 and lon < -90.4 and lon > -97 :
pass
elif lon > -88 and lat > 16:
pass
elif lat > 24 and lon > -91:
pass
elif lat < 23.7 and lon < -105.5:
pass
elif lat < 18.27 and lon < -101:
pass
elif lat > 20.6 and lon > -98:
pass
elif lat < 24.39 and lon < -106.7:
pass
elif lat < 20.4 and lon < -105.3:
pass
elif lat < 18 and lon > -91:
pass
elif lat < 17.399 and lon < -98:
pass
elif lat < 19.7 and lon < -103.6:
pass
else:
contador = contador + 1
datos = open("Poblacion.txt","a")
datos.write(str(lat)+","
+str(lon)+"\n")
datos.close()
#generacion de estados
sano = 0.3 #amarillo 0
s = 0.2 #verde 1
inf = 0.3 #rojo 2
r = 0.2 #azul 3
v = np.random.choice(4, contador, p=[sano, s, inf, r])
for i in v:
data = open("Estados.txt","a")
data.write(str(i)+"\n")
data.close() | mit |
redreamality/learning-to-rank | lerot/comparison/test/evaluateData.py | 2 | 7948 | '''
Created on 15 jan. 2015
@author: Jos
'''
from datetime import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
params = {
#'text.latex.preamble': r"\usepackage{lmodern}",
#'text.usetex' : True,
#'font.size' : 11,
#'font.family' : 'lmodern',
#'text.latex.unicode': True,
}
plt.rcParams.update(params)
plt.rcParams['text.latex.preamble']=[r"\usepackage{lmodern}"]
#Options
params = {
'text.usetex' : True,
'font.size' : 11,
'font.family' : 'lmodern',
'text.latex.unicode': True,
}
plt.rcParams.update(params)
from pylab import arange,pi,sin,cos,sqrt
fig_width_pt = 480.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (sqrt(5)-1.0)/1.5 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_height = 240*inches_per_pt
fig_size = [fig_width,fig_height]
params = {'backend': 'ps',
'axes.labelsize': 20,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': True,
'figure.figsize': fig_size,
'axes.facecolor': "white",
}
plt.rcParams.update(params)
EXP='sensitivity'
#EXP='bias'
PATH_DATA = '/Users/aschuth/Documents/lerot/lerot-PM/sigir2015short/fixed/' + EXP
PATH_PLOTS = '/Users/aschuth/Documents/lerot/lerot-PM/sigir2015short/plots/' + EXP
METHODS = ['informational', 'navigational', 'perfect']
#METHODS = ['perfect']
#METHODS = ['random']
MEASURES = ['PM', 'TDM', 'PI', 'PM $n=10$', 'PM $n=100$', 'PM $n=1000$']
MEASURORDER = ['PM $n=10$', 'PM $n=100$', 'PM $n=1000$','TDM', 'PI',]
def evaluate():
output = readData()
averages = [[[np.average([np.average([output[method][fold][run][k][measure]
for fold in range(5)])
for run in range(25)])
for k in range(1000)]
for measure in range(1, 7)]
for method in range(len(METHODS))]
for i, average in enumerate(averages):
method = METHODS[i]
std = [[np.std(np.array([output[METHODS.index(method)][fold][run][k][measure]
for fold in range(5)
for run in range(25)]))
for k in range(1000)]
for measure in range(1, 7)]
visualizeError(average, MEASURES, std, imageName=method, show=False,
x_range=1000)
def get_files(path):
files = []
for i in os.listdir(path):
if os.path.isfile(os.path.join(path, i)):
files.append(os.path.join(path, i))
else:
files += get_files(os.path.join(path, i))
return files
def readData(path=PATH_DATA, methods=METHODS):
'''
OUTPUT:
- list containing a list for each method
containing a list for each fold
containing a list for each run
containing a list of iteration, probablistic_multileave,
teamdraft_multi, probabilistic_non_bin_multi, probabilistic_inter
'''
output = []
allfiles = get_files(path)
for m in methods:
files = [f for f in allfiles if m in f and "out.txt" in f]
output_method = []
print m, files
for f in files:
print(f)
with open(f, "r") as myfile:
output_file = []
output_run = []
for line in myfile.readlines():
if "RUN" in line:
output_run = []
elif line in ['\n', '\r\n']:
output_file.append(output_run)
elif 'probabilistic' in line:
pass
else:
output_run.append([float(l) for l in line.split()])
output_method.append(output_file)
output.append(output_method)
return output
def get_significance(mean_1, mean_2, std_1, std_2, n):
significance = "-"
ste_1 = std_1 / np.sqrt(n)
ste_2 = std_2 / np.sqrt(n)
t = (mean_1 - mean_2) / np.sqrt(ste_1 ** 2 + ste_2 ** 2)
if mean_1 > mean_2:
# treatment is worse than baseline
# values used are for 120 degrees of freedom
# (http://changingminds.org/explanations/research/analysis/
# t-test_table.htm)
if abs(t) >= 2.62:
significance = "\dubbelneer"
elif abs(t) >= 1.98:
significance = "\enkelneer"
else:
if abs(t) >= 2.62:
significance = "\dubbelop"
elif abs(t) >= 1.98:
significance = "\enkelop"
return significance
def visualizeError(errors, labels, std, path_plots=PATH_PLOTS, imageName='',
show=True, x_range=None, y_range=None):
'''
Show and save a graph of the errors over time
ARGS:
- errors: list of list of errors: for each method an list of the errors
over time
- labels: list of names for the methods
- path_plots = where to save the data. If None, it wont be saved
- imageName = name of the image if it is saved in the path_plots
'''
fig = plt.figure(facecolor="white")
fig.patch.set_facecolor('white')
plt.hold(True)
colors = [('red', '-'), ('green', '-'), ('blue', '-'), ('orange', '--'), ('orange', '-.'), ('orange', ':')]
valdict = {}
for e, s, l, c in zip(errors, std, labels, colors):
if l == "PM":
continue
valdict[l] = (e, s, c)
vals = {}
for l in MEASURORDER:
e, s, c = valdict[l]
if x_range is not None:
e = e[:x_range]
s = s[:x_range]
x = np.arange(len(e))
e = np.array(e)
n = 50
#plt.errorbar(x[::n], e[x[::n]], yerr=np.array(s)[x[::n]] / 3, ecolor=c[0], fmt='none')
plt.plot(x, e, label=l, color=c[0], ls=c[1])
vals[l] = (e[500], s[500])
#plt.errorbar(x, e, yerr=np.array(s)[x[::n]] / 3, label=l, color=c[0], ls=c[1])
print imageName
for l in MEASURORDER:
if l == "PM":
continue
e, s = vals[l]
sig1 = ""
sig2 = ""
if "PM" in l:
sig1 = get_significance(e, vals["PI"][0], s, vals["PI"][1], 125*2)
sig2 = get_significance(e, vals["TDM"][0], s, vals["TDM"][1], 125*2)
print "%s %.3f \small{(%.2f)} %s %s\t &" % (l, e, s, sig1, sig2)
ax = plt.subplot(111)
ax.patch.set_facecolor('white')
#ax.spines["top"].set_visible(False)
#ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.spines['bottom'].set_color('k')
ax.spines['top'].set_color('k')
ax.spines['right'].set_color('k')
ax.spines['left'].set_color('k')
ax.text(30, .7, "\emph{%s}" % imageName, size=20)
#ax.set_yscale('log')
plt.xlim([0, x_range])
plt.ylim([0, .8])
ax.tick_params(axis='both', which='major', color="k")
plt.ylabel('$E_{bin}$', color="k")
if imageName.lower() in ["perfect", "random"] :
plt.legend(ncol=2, frameon=False, loc=7)
if imageName.lower() in ["informational", "random"]:
plt.xlabel('query impressions', color="k")
if show:
plt.show()
plt.hold(False)
if path_plots is not None:
now = datetime.now()
imageName = 'plot_' + imageName #+ '_'.join([str(now.hour),
# str(now.minute),
#str(now.second)])
fig.tight_layout()
fig.savefig(path_plots + imageName + '.pdf', format='pdf', #transparant=True,
facecolor="white")
if __name__ == '__main__':
evaluate()
| gpl-3.0 |
jorik041/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
jereze/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/svm/plot_svm_scale_c.py | 26 | 5353 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `L1` penalty, as well as the `L2` penalty.
L1-penalty case
-----------------
In the `L1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `L1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
L2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `L1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `L2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `L1` case works better on sparse data, while `L2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# L1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# L2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='L1', loss='L2', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='L2', loss='L2', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
jchodera/MSMs | code/sandbox/tica_kde_svm.py | 3 | 2319 | from sklearn.covariance import EllipticEnvelope
import sklearn.neighbors
from sklearn.svm import OneClassSVM
import os
from msmbuilder import example_datasets, cluster, msm, featurizer, lumping, utils, dataset, decomposition
sysname = os.path.split(os.getcwd())[-1]
dt = 0.25
tica_lagtime = 400
regularization_string = "_012"
X0 = dataset.dataset("./tica/tica%d%s.h5" % (tica_lagtime, regularization_string))
slicer = featurizer.FirstSlicer(2)
X = slicer.transform(X0)
Xf0 = np.concatenate(X)
Xf = Xf0[::50]
hexbin(Xf0[:, 0], Xf0[:, 1], bins='log')
svm = OneClassSVM(nu=0.15)
svm.fit(Xf)
y = svm.predict(Xf)
plot(Xf[y==1][:, 0], Xf[y==1][:, 1], 'kx')
plot(Xf[y==-1][:, 0], Xf[y==-1][:, 1], 'wx')
clusterer = cluster.GMM(n_components=3)
yi = map(lambda x: svm.predict(x), X)
from msmbuilder.cluster import MultiSequenceClusterMixin, BaseEstimator
from sklearn.svm import OneClassSVM
class OneClassSVMTrimmer(MultiSequenceClusterMixin, OneClassSVM, BaseEstimator):
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
pass
def transform(self, traj_list, y=None):
"""Featurize a several trajectories.
Parameters
----------
traj_list : list(mdtraj.Trajectory)
Trajectories to be featurized.
Returns
-------
features : list(np.ndarray), length = len(traj_list)
The featurized trajectories. features[i] is the featurized
version of traj_list[i] and has shape
(n_samples_i, n_features)
"""
return [self.partial_transform(traj) for traj in traj_list]
trimmer = OneClassSVMTrimmer()
trimmer.fit(X[0:10])
| gpl-2.0 |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_alpha_investing.py | 1 | 1497 | import scipy.io
from sklearn import cross_validation
from sklearn.metrics import accuracy_score
from skfeature.function.streaming import alpha_investing
from sklearn import svm
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
y = y.astype(float)
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of selected features
idx = alpha_investing.alpha_investing(X[train], y[train], 0.05, 0.05)
# obtain the dataset on the selected features
selected_features = X[:, idx]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | mit |
michaelpacer/dynd-python | dynd/benchmarks/benchrun.py | 8 | 5322 | """
Benchrun is a Python script for defining and running performance benchmarks.
It allows you to run a benchmark for different versions of the code and for
different values of an input parameter, and automatically generates tables
that compare the results.
A benchmark is defined by creating a subclass of Benchmark.
The subclass should define a method run() that executes the code
to be timed and returns the elapsed time in seconds (as a float),
or None if the benchmark should be skipped.
This file was originally taken from https://code.google.com/p/benchrun/ under the MIT License,
but has been modified since.
"""
from __future__ import print_function
import math
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302478
def combinations(*seqin):
def rloop(seqin,comb):
if seqin:
for item in seqin[0]:
newcomb = comb + [item]
for item in rloop(seqin[1:],newcomb):
yield item
else:
yield comb
return rloop(seqin,[])
def _mean(n = 10):
def wrap(func):
def wrapper(*args, **kwds):
results = [func(*args, **kwds) for i in range(n)]
return math.fsum(results) / n
return wrapper
return wrap
def mean(callable_or_value):
if callable(callable_or_value):
return _mean()(callable_or_value)
return _mean(callable_or_value)
def _median(n = 10):
def wrap(func):
def wrapper(*args, **kwds):
results = sorted(func(*args, **kwds) for i in range(n))
i = n // 2
if n % 2 == 1:
return results[i]
return (results[i - 1] + results[i]) / 2.0
return wrapper
return wrap
def median(callable_or_value):
if callable(callable_or_value):
return _median()(callable_or_value)
return _median(callable_or_value)
class Benchmark:
sort_by = []
reference = None
def __init__(self):
self.pnames = []
self.pvalues = []
self.results = []
self.results_dict = {}
for pname in self.parameters:
value = getattr(self, pname)
self.pnames.append(pname)
self.pvalues.append(value)
self.pcombos = list(combinations(*self.pvalues))
if self.reference:
self.reference_param = self.reference[0]
self.reference_value = self.reference[1]
def time_all(self):
"""Run benchmark for all versions and parameters."""
for params in self.pcombos:
args = dict(zip(self.pnames, params))
t = self.run(**args)
self.results.append(tuple(params) + (t,))
self.results_dict[tuple(params)] = t
def sort_results(self):
sort_keys = []
for name in self.sort_by:
sort_keys += [self.pnames.index(name)]
for i, name in enumerate(self.pnames):
if i not in sort_keys:
sort_keys += [i]
def key(v):
return list(v[i] for i in sort_keys)
self.results.sort(key=key)
def get_factor(self, pvalues, time):
if not self.reference or not time:
return None
pvalues = list(pvalues)
i = self.pnames.index(self.reference_param)
if pvalues[i] == self.reference_value:
return None
else:
pvalues[i] = self.reference_value
ref = self.results_dict[tuple(pvalues)]
if ref == None:
return None
return ref / time
def print_result(self):
"""Run benchmark for all versions and parameters and print results
in tabular form to the standard output."""
self.time_all()
self.sort_results()
print("=" * 78)
print()
print(self.__class__.__name__)
print(self.__doc__, "\n")
colwidth = 15
reftimes = {}
ts = "seconds"
if self.reference:
ts += " (x faster than " + (str(self.reference_value)) + ")"
print(" ", " ".join([str(r).ljust(colwidth) for r in self.pnames + [ts]]))
print("-"*79)
rows = []
for vals in self.results:
pvalues = vals[:-1]
time = vals[-1]
if time == None:
stime = "(n/a)"
else:
stime = "%.8f" % time
factor = self.get_factor(pvalues, time)
if factor != None:
stime += (" (%.2f)" % factor)
vals = pvalues + (stime,)
row = [str(val).ljust(colwidth) for val in vals]
print(" ", " ".join(row))
print()
def plot_result(self, loglog = False):
import matplotlib
import matplotlib.pyplot
self.time_all()
self.sort_results()
if loglog:
from matplotlib.pyplot import loglog as plot
else:
from matplotlib.pyplot import plot
plot(*zip(*self.results), label = self.__class__.__name__, marker = "o", linestyle = '--', linewidth = 2)
matplotlib.pyplot.xlabel(self.pnames[0])
matplotlib.pyplot.ylabel("seconds")
matplotlib.pyplot.legend(loc = 2, markerscale = 0)
| bsd-2-clause |
ky822/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
grlee77/scipy | scipy/spatial/_plotutils.py | 12 | 7057 | import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt # type: ignore[import]
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = getattr(ax, 'ishold', lambda: True)()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import Delaunay, delaunay_plot_2d
The Delaunay triangulation of a set of random points:
>>> rng = np.random.default_rng()
>>> points = rng.random((30, 2))
>>> tri = Delaunay(points)
Plot it:
>>> _ = delaunay_plot_2d(tri)
>>> plt.show()
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
The convex hull of a random set of points:
>>> rng = np.random.default_rng()
>>> points = rng.random((30, 2))
>>> hull = ConvexHull(points)
Plot it:
>>> _ = convex_hull_plot_2d(hull)
>>> plt.show()
"""
from matplotlib.collections import LineCollection # type: ignore[import]
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points: bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha: float, optional
Specifies the line alpha for polygon boundaries
point_size: float, optional
Specifies the size of points
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
Examples
--------
Set of point:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> points = rng.random((10,2))
Voronoi diagram of the points:
>>> from scipy.spatial import Voronoi, voronoi_plot_2d
>>> vor = Voronoi(points)
using `voronoi_plot_2d` for visualisation:
>>> fig = voronoi_plot_2d(vor)
using `voronoi_plot_2d` for visualisation with enhancements:
>>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
... line_width=2, line_alpha=0.6, point_size=2)
>>> plt.show()
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
point_size = kw.get('point_size', None)
ax.plot(vor.points[:,0], vor.points[:,1], '.', markersize=point_size)
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
if (vor.furthest_site):
direction = -direction
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
FRBs/DM | frb/galaxies/utils.py | 1 | 4065 | """ Utilities related to FRB galaxies"""
import os
import glob
from IPython import embed
from pkg_resources import resource_filename
import numpy as np
try:
from specdb.specdb import SpecDB
except ImportError:
flg_specdb = False
else:
flg_specdb = True
from astropy.coordinates import SkyCoord
import pandas as pd
from frb import frb
def load_specdb(specdb_file=None):
"""
Automatically load the specDB file from $SPECDB/FRB_specDB.hdf5
Args:
specdb_file (str, optional):
Over-ride the default file
Returns:
specdb.specdb.SpecDB:
"""
if not flg_specdb:
raise IOError("You must install the specdb package first!")
return
if specdb_file is None:
if os.getenv('SPECDB') is None:
raise IOError("You must set the SPECDB environmental variable")
specdb_files = glob.glob(os.path.join(os.getenv('SPECDB'), 'FRB_specDB_*.hdf5'))
if len(specdb_files) > 0:
specdb_file = specdb_files[0]
print("Loading spectra from {:s}".format(specdb_file))
else:
raise IOError("There are no FRB_specdb.hdf5 files in your SPECDB folder")
# Load it up
specDB = SpecDB(db_file=specdb_file)
# Return
return specDB
def list_of_hosts():
"""
Scan through the Repo and generate a list of FRB Host galaxies
Also returns a list of the FRBs
Returns:
list, list:
"""
# FRB files
frb_data = resource_filename('frb', 'data')
frb_files = glob.glob(os.path.join(frb_data, 'FRBs', 'FRB*.json'))
frb_files.sort()
hosts = []
frbs = []
for ifile in frb_files:
# Parse
name = ifile.split('.')[-2]
ifrb = frb.FRB.by_name(name)
host = ifrb.grab_host()
if host is not None:
hosts.append(host)
frbs.append(ifrb)
# Return
return frbs, hosts
def build_table_of_hosts():
"""
Generate a Pandas table of FRB Host galaxy data. These are slurped
from the 'derived', 'photom', and 'neb_lines' dicts of each host object
Warning: As standard, missing values are given NaN in the Pandas table
Be careful!
Note:
RA, DEC are given as RA_host, DEC_host to avoid conflict with the FRB table
Returns:
pd.DataFrame, dict: Table of data on FRB host galaxies, dict of their units
"""
frbs, hosts = list_of_hosts()
nhosts = len(hosts)
# Table
host_tbl = pd.DataFrame({'Host': [host.name for host in hosts]})
frb_names = [host.frb.frb_name for host in hosts]
host_tbl['FRBname'] = frb_names
tbl_units = {}
# Coordinates
coords = SkyCoord([host.coord for host in hosts])
# Named to faciliate merging with an FRB table
host_tbl['RA_host'] = coords.ra.value
host_tbl['DEC_host'] = coords.dec.value
tbl_units['RA_host'] = 'deg'
tbl_units['DEC_host'] = 'deg'
# FRBs
host_tbl['FRBobj'] = frbs
# Loop on all the main dicts
for attr in ['derived', 'photom', 'neb_lines','offsets','morphology','redshift']:
# Load up the dicts
dicts = [getattr(host, attr) for host in hosts]
# Photometry
all_keys = []
for idict in dicts:
all_keys += list(idict.keys())
#all_keys += list(host.photom.keys())
#
all_keys = np.array(all_keys)
uni_keys = np.unique(all_keys)
# Slurp using Nan's for missing values
tbl_dict = {}
for key in uni_keys:
tbl_dict[key] = np.array([np.nan]*nhosts)
for ss in range(nhosts): #, host in enumerate(hosts):
for pkey in dicts[ss].keys(): #host.photom.keys():
tbl_dict[pkey][ss] = dicts[ss][pkey]
for key in tbl_dict.keys():
# Error check
if key in host_tbl.keys():
raise IOError("Duplicate items!!")
# Set
host_tbl[key] = tbl_dict[key]
tbl_units[key] = 'See galaxies.defs.py'
# Return
return host_tbl, tbl_units
| bsd-3-clause |
UNR-AERIAL/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
yousrabk/mne-python | examples/simulation/plot_simulate_raw_data.py | 7 | 2709 | """
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source
activation multiple times.
"""
# Authors: Yousra Bekhti <[email protected]>
# Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import read_source_spaces, find_events, Epochs, compute_covariance
from mne.io import Raw
from mne.datasets import sample
from mne.simulation import simulate_sparse_stc, simulate_raw
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src_fname = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem_fname = (data_path +
'/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif')
# Load real data as the template
raw = Raw(raw_fname).crop(0., 30., copy=False) # 30 sec is enough
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
src = read_source_spaces(src_fname)
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=0)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
fig.show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw, stc, trans_fname, src, bem_fname, cov='simple',
iir_filter=[0.2, -0.2, 0.04], ecg=True, blink=True,
n_jobs=2, verbose=True)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical') # quick calc
evoked = epochs.average()
evoked.plot_white(cov)
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/ensemble/tests/test_base.py | 2 | 4160 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble.base import _set_random_states
from sklearn.linear_model import Perceptron
from sklearn.externals.odict import OrderedDict
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectFromModel
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(random_state=None),
n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
random_state = np.random.RandomState(3)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
assert_equal(ensemble[0].random_state, None)
assert_true(isinstance(ensemble[1].random_state, int))
assert_true(isinstance(ensemble[2].random_state, int))
assert_not_equal(ensemble[1].random_state, ensemble[2].random_state)
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
def test_set_random_states():
# Linear Discriminant Analysis doesn't have random state: smoke test
_set_random_states(LinearDiscriminantAnalysis(), random_state=17)
clf1 = Perceptron(random_state=None)
assert_equal(clf1.random_state, None)
# check random_state is None still sets
_set_random_states(clf1, None)
assert_true(isinstance(clf1.random_state, int))
# check random_state fixes results in consistent initialisation
_set_random_states(clf1, 3)
assert_true(isinstance(clf1.random_state, int))
clf2 = Perceptron(random_state=None)
_set_random_states(clf2, 3)
assert_equal(clf1.random_state, clf2.random_state)
# nested random_state
def make_steps():
return [('sel', SelectFromModel(Perceptron(random_state=None))),
('clf', Perceptron(random_state=None))]
est1 = Pipeline(make_steps())
_set_random_states(est1, 3)
assert_true(isinstance(est1.steps[0][1].estimator.random_state, int))
assert_true(isinstance(est1.steps[1][1].random_state, int))
assert_not_equal(est1.get_params()['sel__estimator__random_state'],
est1.get_params()['clf__random_state'])
# ensure multiple random_state paramaters are invariant to get_params()
# iteration order
class AlphaParamPipeline(Pipeline):
def get_params(self, *args, **kwargs):
params = Pipeline.get_params(self, *args, **kwargs).items()
return OrderedDict(sorted(params))
class RevParamPipeline(Pipeline):
def get_params(self, *args, **kwargs):
params = Pipeline.get_params(self, *args, **kwargs).items()
return OrderedDict(sorted(params, reverse=True))
for cls in [AlphaParamPipeline, RevParamPipeline]:
est2 = cls(make_steps())
_set_random_states(est2, 3)
assert_equal(est1.get_params()['sel__estimator__random_state'],
est2.get_params()['sel__estimator__random_state'])
assert_equal(est1.get_params()['clf__random_state'],
est2.get_params()['clf__random_state'])
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/lmfit-py/examples/fit_NIST_lmfit.py | 4 | 5071 | from __future__ import print_function
import sys
import math
from optparse import OptionParser
try:
import matplotlib
matplotlib.use('WXAgg')
import pylab
HASPYLAB = True
except ImportError:
HASPYLAB = False
from lmfit import Parameters, minimize
from NISTModels import Models, ReadNistData
def ndig(a, b):
"precision for NIST values"
return round(-math.log10((abs(abs(a)-abs(b)) +1.e-15)/ abs(b)))
def Compare_NIST_Results(DataSet, myfit, NISTdata):
print(' ======================================')
print(' %s: ' % DataSet)
print(' | Parameter Name | Value Found | Certified Value | # Matching Digits |')
print(' |----------------+----------------+------------------+-------------------|')
params = myfit.params
val_dig_min = 200
err_dig_min = 200
for i in range(NISTdata['nparams']):
parname = 'b%i' % (i+1)
par = params[parname]
thisval = par.value
certval = NISTdata['cert_values'][i]
vdig = ndig(thisval, certval)
pname = (parname + ' value ' + ' '*14)[:14]
print(' | %s | % -.7e | % -.7e | %2i |' % (pname, thisval, certval, vdig))
val_dig_min = min(val_dig_min, vdig)
thiserr = par.stderr
certerr = NISTdata['cert_stderr'][i]
if thiserr is not None and myfit.errorbars:
edig = ndig(thiserr, certerr)
ename = (parname + ' stderr' + ' '*14)[:14]
print(' | %s | % -.7e | % -.7e | %2i |' % (ename, thiserr, certerr, edig))
err_dig_min = min(err_dig_min, edig)
print(' |----------------+----------------+------------------+-------------------|')
sumsq = NISTdata['sum_squares']
try:
chi2 = myfit.chisqr
print(' | Sum of Squares | %.7e | %.7e | %2i |' % (chi2, sumsq,
ndig(chi2, sumsq)))
except:
pass
print(' |----------------+----------------+------------------+-------------------|')
if not myfit.errorbars:
print(' | * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * * |')
err_dig_min = 0
if err_dig_min < 199:
print(' Worst agreement: %i digits for value, %i digits for error ' % (val_dig_min, err_dig_min))
else:
print(' Worst agreement: %i digits' % (val_dig_min))
return val_dig_min
def NIST_Test(DataSet, method='leastsq', start='start2', plot=True):
NISTdata = ReadNistData(DataSet)
resid, npar, dimx = Models[DataSet]
y = NISTdata['y']
x = NISTdata['x']
params = Parameters()
for i in range(npar):
pname = 'b%i' % (i+1)
cval = NISTdata['cert_values'][i]
cerr = NISTdata['cert_stderr'][i]
pval1 = NISTdata[start][i]
params.add(pname, value=pval1)
myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
digs = Compare_NIST_Results(DataSet, myfit, NISTdata)
if plot and HASPYLAB:
fit = -resid(myfit.params, x)
pylab.plot(x, y, 'ro')
pylab.plot(x, fit, 'k+-')
pylab.show()
return digs > 2
modelnames = []
ms = ''
for d in sorted(Models.keys()):
ms = ms + ' %s ' % d
if len(ms) > 55:
modelnames.append(ms)
ms = ' '
modelnames.append(ms)
modelnames = '\n'.join(modelnames)
usage = """
=== Test Fit to NIST StRD Models ===
usage:
------
python fit_NIST.py [options] Model Start
where Start is one of 'start1','start2' or 'cert', for different
starting values, and Model is one of
%s
if Model = 'all', all models and starting values will be run.
options:
--------
-m name of fitting method. One of:
leastsq, nelder, powell, lbfgsb, bfgs,
tnc, cobyla, slsqp, cg, newto-cg
leastsq (Levenberg-Marquardt) is the default
""" % modelnames
############################
parser = OptionParser(usage=usage, prog="fit-NIST.py")
parser.add_option("-m", "--method", dest="method", metavar='METH',
default='leastsq', help="set method name, default = 'leastsq'")
(opts, args) = parser.parse_args()
dset = ''
start = 'start2'
if len(args) > 0:
dset = args[0]
if len(args) > 1:
start = args[1]
if dset.lower() == 'all':
tpass = 0
tfail = 0
failures = []
dsets = sorted(Models.keys())
for dset in dsets:
for start in ('start1', 'start2', 'cert'):
if NIST_Test(dset, method=opts.method, start=start, plot=False):
tpass += 1
else:
tfail += 1
failures.append(" %s (starting at '%s')" % (dset, start))
print('--------------------------------------')
print(' Fit Method: %s ' % opts.method)
print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
print(' Tests Failed for:\n %s' % '\n '.join(failures))
print('--------------------------------------')
elif dset not in Models:
print(usage)
else:
NIST_Test(dset, method=opts.method, start=start, plot=True)
| apache-2.0 |
joefutrelle/noaa_floats | etl.py | 1 | 4595 | from pandas import read_csv
from orm import Base, Float, Point
from utils import parse_date_time, xa
"""
Extract, transform, and load procedure for floats database
"""
# column definitions
DATA_COLS='ID,DATE,TIME,LAT,LON,PRESS,U,V,TEMP,Q_TIME,Q_POS,Q_PRESS,Q_VEL,Q_TEMP'.split(',')
METADATA_COLS='ID,PRINCIPAL_INVESTIGATOR,ORGANIZATION,EXPERIMENT,1st_DATE,1st_LAT,1st_LON,END_DATE,END_LAT,END_LON,TYPE,FILENAME'.split(',')
# separators
DATA_SEPARATOR=r'\s+'
METADATA_SEPARATOR=r'(?:\b|\))(?:\s*\t+\s*|\s\s)(?=[-0-9a-zA-Z])'
# number of points to process at once
CHUNK_SIZE=10000
# local database connection information
DATABASE_URL='postgresql://floats:floats@localhost/floats'
def etl_data():
"""
Load CSV file containing point data for all floats
"""
n = 0
for chunk in read_csv('./data/floats.dat',sep=DATA_SEPARATOR,iterator=True,chunksize=CHUNK_SIZE):
chunk.fillna(0,inplace=True) # FIXME handle NA's better, e.g., with NULLs
with xa(DATABASE_URL) as session:
for index, row in chunk.iterrows():
pt = Point(**{
'float_id': row.ID,
'date': parse_date_time(row.DATE, row.TIME),
'lat': row.LAT,
'lon': row.LON,
'pressure': row.PRESS,
'u': row.U,
'v': row.V,
'temperature': row.TEMP,
'q_time': row.Q_TIME,
'q_pos': row.Q_POS,
'q_press': row.Q_PRESS,
'q_vel': row.Q_VEL,
'q_temp': row.Q_TEMP
})
session.add(pt)
n += len(chunk.index)
print 'added %d point(s)...' % n
def etl_metadata():
"""
Load CSV file containing metadata for all floats
"""
df = read_csv('./data/floats_dirfl.dat',sep=METADATA_SEPARATOR,index_col=False)
print 'adding %s float(s)...' % len(df.index)
with xa(DATABASE_URL, Base.metadata) as session:
for index, row in df.iterrows():
flt = Float(**{
'id': row.ID,
'pi': row.PRINCIPAL_INVESTIGATOR,
'organization': row.ORGANIZATION,
'experiment': row.EXPERIMENT,
'start_date': row['1st_DATE'],
'start_lat': row['1st_LAT'],
'start_lon': row['1st_LON'],
'end_date': row.END_DATE,
'end_lat': row.END_LAT,
'end_lon': row.END_LON,
'type': row.TYPE,
'filename': row.FILENAME
})
session.add(flt)
def etl_tracks():
"""
Once float and point data is loaded, generate a track geometry
for each float from the point data. This has less information in it
than the point data and is used for geospatial queries
"""
with xa(DATABASE_URL) as session:
n = 0
# for each float, construct a WKT LINESTRING geometry
for f in session.query(Float).order_by(Float.id):
prev_lon = 0
ls = []
ps = []
np = 0
# for all the points in this float's track:
for p in f.points:
if p.lon != -999 and p.lat != -99: # exclude noninformative points
np += 1
lat, lon = float(p.lat), float(p.lon)
# what if float just crossed the int'l date line
if (prev_lon < -90 and lon > 90) or (prev_lon > 90 and lon < -90):
if len(ps)==1:
ps = ps + ps
ls += [ps]
ps = []
ps.append('%.6f %.6f' % (lon, lat))
prev_lon = lon
if len(ps) == 1: # need more than one point
ps = ps + ps
ls += [ps]
# format in WKT
ls = 'MULTILINESTRING(%s)' % ','.join(['(%s)' % ','.join(ps) for ps in ls])
print '%d: Float %ld %d points' % (n, f.id, np)
f.track = ls
n += 1
if n % 100 == 0: # commit occasionally
session.commit()
# final commit is handled by the context manager
def etl():
"""
Extract, transform, and load all data.
"""
etl_metadata() # first, float metadata
etl_data() # then all data
etl_tracks() # then geospatial tracks
if __name__=='__main__':
"""
Extract, transform, and load all data
"""
etl()
| mit |
ReservoirWebs/GrowChinook | RunModelSens2_new.py | 1 | 6409 | #!/usr/bin/python
import os
import matplotlib
matplotlib.use('Agg')
import sys
import csv
import Bioenergetics_advsens_new as bio
import cgi, cgitb
import pylab
import io
from PIL import Image, ImageDraw
import base64
from matplotlib.ticker import FormatStrFormatter
from matplotlib.font_manager import FontProperties
from scipy.interpolate import griddata
import pandas
import PrintPages as pt
PROCESS_ID = os.getpid()
ADDRESS = cgi.escape(os.environ["REMOTE_ADDR"])
SCRIPT = "Adv Sensitivity Run Page"
pt.write_log_entry(SCRIPT, ADDRESS)
CWD = os.getcwd()
bio.scruffy(CWD, CWD, 'output*')
bio.scruffy('uploads/daph/', CWD, '*')
bio.scruffy('uploads/temp/', CWD, '*')
cgitb.enable()
form = cgi.FieldStorage()
vals = bio.Adv_Sens_Form_Data_Packager(form)
pid = os.getpid()
fname=("output_%s.csv" % pid)
SHORT_OUT_FILENAME = ("output_short_%s.csv" % pid)
pt.print_header(vals.title, 'Sens')
Months2014 = ['June', 'July', 'August']
Months2015 = ['March', 'April', 'May', 'June', 'July', 'August']
Months2016 = ['April', 'May', 'June', 'July', 'August','September']
year = form.getvalue('Year')
if year == '2014':
months = Months2014
elif year == '2015':
months = Months2015
else:
months = Months2016
SensParam = form.getvalue('Sens_Param')
fig = bio.pyplot.figure(facecolor='#c8e9b1')
fontP = FontProperties()
fontP.set_size('small')
fig.suptitle('Spring Chinook', fontsize=20)
ax2 = fig.add_subplot(211)
ax2.set_ylabel('Day 1 Growth Rate')
#ax.set_aspect('equal', adjustable='box')
if SensParam == 'Total Daphnia':
ax2.set_xlabel('Total Daphnia (Thousands)')
else:
ax2.set_xlabel('%s' % SensParam)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax2.grid()
ax3 = fig.add_subplot(212)
ax3.set_ylabel('Final Growth Rate')
#ax.set_aspect('equal', adjustable='box')
if SensParam == 'Total Daphnia':
ax3.set_xlabel('Total Daphnia (Thousands)')
else:
ax3.set_xlabel('%s' % SensParam)
ax3.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax3.grid()
base_set_flag = 0
base_val = 0
for month in months:
vals.site_data.month = month
vals.daph_data.d_month = month
all_results = []
all_growths = []
all_growths1 = []
all_csv_headers = []
all_sens_inputs = []
try:
vals.site_data.light, vals.daph_data.total_daph, vals.daph_data.daph_size = bio.get_vals(vals.site_data.light, vals.daph_data.total_daph, vals.daph_data.daph_size, vals.site_data.site, vals.site_data.month, vals.site_data.year)
FRESH_BATCH = bio.Batch(vals.site_data, vals.starting_mass, vals.daph_data, vals.max_temp, vals.min_temp, "None_T_None_None.csv", None, None)
BASE_RESULTS, DAPHNIA_CONSUMED, CONDITION, CONDITION1, DAY_TEMP, NIGHT_TEMP, \
POPULATION_ESTIMATE = FRESH_BATCH.Run_Batch()
sens_factors = bio.sensitivity_expand(form)
results, growths, growths1, csv_headers, sens_inputs, SHORT_RESULTS, base_val, base_set_flag = bio.run_sensitivity(sens_factors, form.getvalue('Sens_Param'), vals.site_data, vals.starting_mass, vals.daph_data, vals.max_temp, vals.min_temp, "None_T_None_None.csv", None, None, base_val, base_set_flag)
#all_results.append(results), all_csv_headers.append(csv_headers)
for i in range(len(growths)):
all_growths.append(growths[i])
all_growths1.append(growths1[i])
all_sens_inputs.append(sens_inputs[i])
ax2.plot(all_sens_inputs, all_growths1, label=("%s" % month))
ax3.plot(all_sens_inputs,all_growths, label=("%s" % month))
with open(fname, 'a') as outfile:
writer = csv.writer(outfile)
for i in range(len(all_results)):
for j in range(len(all_results[i])):
writer.writerow(all_csv_headers[i][j])
writer.writerow(all_results[i][j].keys())
writer.writerows(zip(*all_results[i][j].values()))
outfile.close()
with open(SHORT_OUT_FILENAME, 'a') as outfile:
writer = csv.writer(outfile)
for results in all_results:
for set in results:
writer.writerow(set.keys())
writer.writerows(zip(*set.values()))
outfile.close()
except:
# print('Content-Type: text/html')
# print('Location: http://growchinook.fw.oregonstate.edu/error.html')
# print('<html>')
# print('<head>')
# print('<meta http-equiv="refresh" '
# 'content="0;url=http://growchinook.fw.oregonstate.edu/error.html" />')
# print('<title>You are going to be redirected</title>')
# print('</head>')
# print('<body>')
# print('Wait <a href="http://growchinook.fw.oregonstate.edu/error.html">'
# 'Click here if you are not redirected</a>')
# print('</body>')
# print('</html>')
cgitb.handler()
art = []
lgd = pylab.legend(prop = fontP,loc=9, bbox_to_anchor=(0.5, -0.1), ncol=3)
bio.pyplot.gcf().subplots_adjust(bottom=0.35)
art.append(lgd)
fig.tight_layout(pad=2, h_pad=None, w_pad=None, rect=None)
pylab.savefig( "new_{}.png".format(pid),facecolor=fig.get_facecolor(), edgecolor='lightblue')
data_uri = base64.b64encode(open('new_{}.png'.format(pid), 'rb').read()).decode('utf-8').replace('\n', '')
img_tag = '<img class="results" src="data:image/png;base64,{0}">'.format(data_uri)
print(img_tag)
pt.print_in_data(FRESH_BATCH.site, FRESH_BATCH.year, vals.starting_mass, FRESH_BATCH.total_daphnia,
FRESH_BATCH.daphnia_size, FRESH_BATCH.light)
print('''
</div>
</div>
<br>
''')
if vals.site_data.max_depth < 40 or vals.site_data.min_depth != -1:
print('''<div style="width:600px;display:inline-block;font: normal normal 18px
'Times New Roman', Times, FreeSerif, sans-serif;"><div style="float:left;">
Depth restricted to between %.2fm and %.2fm.</div><br>''' % (vals.site_data.min_dep, vals.site_data.max_depth))
if vals.max_temp != 10000 or vals.min_temp != -1:
print('''<div style="float:left;">
Temperature restricted to between %.2f degrees and %.2f degrees.</div><br>
''' % (vals.max_temp, vals.min_temp))
print('</div>')
pt.print_adv_sens_form(fname, SHORT_OUT_FILENAME, 'out', 'RunModelSens2_test.py')
print('''
</body>
</html>
''')
os.remove('new_{}.png'.format(pid))
quit() | gpl-3.0 |
GroovyGregor/master_thesis | python/ModuleSetup/MasterModule/grid_plot.py | 1 | 3860 | '''Class for general plots on cartesian grids'''
# Python modules
import numpy as np
from matplotlib.colors import LinearSegmentedColormap as lsc
# MasterModule
from .cartesian_grid import CartesianGrid
class GridPlot(CartesianGrid):
'''Class for plotting on a cartesian grid
This class is a subclass of the :any:`CartesianGrid` class and the
super class of :any:`ReflPlot`, :any:`HeightsPlot` and
:any:`ReflDiffPlot`. This class only saves all general attributes,
which are the same for all kind of plots on a cartesian grid.
Attributes:
log_iso (:any:`bool`): If True --> isolines around rain areas will be
plotted.
rain_th (:any:`int`): Dbz threshold, at which rain is assumed.
lon_plot (:any:`numpy.ndarray`): All longitude ticks.
lat_plot (:any:`numpy.ndarray`): All latitude ticks.
lon_ticks (:any:`numpy.ndarray`): Longitude ticks, which will be
labeled.
lat_ticks (:any:`numpy.ndarray`): Latitute ticks, which will be
labeled.
lon_label (:any:`numpy.ndarray`): Labels of Longitude ticks.
lat_label (:any:`numpy.ndarray`): Labels of Latitude ticks.
mask (:any:`numpy.ndarray`): Mask array.
cm_mask (:any:`matplotlib.colors.LinearSegmentedColormap`):
Colormap for the mask.
'''
def __init__(self, grid_par, plot_par):
'''Initialization of GridPlot
Calls the :any:`CartesianGrid.__init__`-method and saves all
general attributes needed for creating a plot on a cartesian
grid.
Args:
grid_par (dict): Grid parameters, e.g. location, resolution
and shape.
plot_par (dict): Plot parameters, e.g. number of grid lines,
logical variabel whether to plot rain area contours,
dbz threshold, height isolines, mask range
'''
# Call initialization method of super class
super().__init__(grid_par)
# Get number of labeled ticks in plot
tick_nr = plot_par['tick_nr']
# If log_iso == True --> draw isolines around rain areas
self.log_iso = plot_par['log_iso']
# Get threshold, at which rain is assumed
self.rain_th = plot_par['rain_th']
# Get x,y array to plot contour plots
self.lon_plot = np.arange(self.lon_shape)
self.lat_plot = np.arange(self.lat_shape)
# Get lon, lat ticks to be labeled
self.lon_ticks = np.linspace(0, self.lon_shape - 1, num=tick_nr)
self.lat_ticks = np.linspace(0, self.lat_shape - 1, num=tick_nr)
# Getting rot. lon-coords of grid lines to be labeled
self.lon_label = np.around(
np.linspace(self.corners.lon_start, self.corners.lon_end,
num=tick_nr), decimals=2
)
# Getting rot. lat-coords of grid lines to be labeled
self.lat_label = np.around(
np.linspace(self.corners.lat_start, self.corners.lat_end,
num=tick_nr), decimals=2
)
# Get mask for grid boxes outside of pattern range
self.mask = self.get_mask(plot_par['max_range'])
# Create colormap for the mask
colors = ['#00000000', 'grey']
self.cm_mask = lsc.from_list('cm_mask', colors)
def make_plot(self):
'''Create a plot on a cartesian grid
This method belongs to specific subclasses :any:`ReflPlot`,
:any:`HeightsPlot` or :any:`ReflDiffPlot`. When the method is
called from this superclass, raise an Error.
Raises:
NotImplementedError: If this method is called.
'''
# Raise error
raise NotImplementedError
| gpl-3.0 |
berkeley-stat159/project-lambda | code/stat159lambda/classification/random_forest/rf_cross_validate.py | 1 | 2085 | # Script that runs cross validation
from __future__ import print_function, division
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.metrics import accuracy_score
from stat159lambda.classification import design_matrix as dm
from stat159lambda.classification.random_forest import rf
from stat159lambda.classification import partition_volumes as pv
from stat159lambda.config import REPO_HOME_PATH, NUM_VOXELS
from stat159lambda.linear_modeling import linear_modeling as lm
from stat159lambda.utils import data_path as dp
NUM_FEATURES = [500, 1000, 2500, 5000, 10000, 20000, 30000, 40000, 42000,
44000, 46000, 48000, 50000]
def main():
subj_num, fwhm_mm = 1, 4
voxels_sorted_by_t_statistic = lm.VoxelExtractor(subj_num,
'int-ext').t_stat()
design_matrix = dm.DesignMatrix(dp.get_smoothed_2d_path(subj_num, fwhm_mm))
train_volume_indices = pv.get_train_indices()
cv_values = []
for num_features in NUM_FEATURES:
voxel_feature_indices = voxels_sorted_by_t_statistic[:num_features]
X_train = design_matrix.get_design_matrix(train_volume_indices,
voxel_feature_indices)
y_train = np.array(design_matrix.get_labels(train_volume_indices))
cv_accuracies = []
for train, test in KFold(len(X_train), 5):
X_cv_train = X_train[train, :]
y_cv_train = y_train[train]
X_cv_test = X_train[test, :]
y_cv_test = y_train[test]
model = rf.Classifier(X_cv_train, y_cv_train)
model.train()
y_predicted = model.predict(X_cv_test)
cv_accuracies.append(accuracy_score(y_predicted, y_cv_test))
print(np.mean(cv_accuracies))
cv_values.append(np.mean(cv_accuracies))
output_path = '{0}/figures/rf_cross_validated_accuracies.txt'.format(
REPO_HOME_PATH)
np.savetxt(output_path, cv_values)
print('Saved {0}'.format(output_path))
if __name__ == '__main__':
main()
| bsd-3-clause |
ligz07/merlin | src/work_in_progress/oliver/dnn_synth.py | 3 | 25741 |
import cPickle
import gzip
import os, sys, errno
import time
import math
import glob
import struct
file_location = os.path.split(os.path.realpath(os.path.abspath(os.path.dirname(__file__))))[0]+'/'
sys.path.append(file_location + '/../')
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
#from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
#from frontend.feature_normalisation_base import FeatureNormBase
from frontend.mean_variance_norm import MeanVarianceNorm
from io_funcs.binary_io import BinaryIOCollection
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
import configuration
from models.dnn import DNN
#from models.ms_dnn import MultiStreamDNN
#from models.ms_dnn_gv import MultiStreamDNNGv
#from models.sdae import StackedDenoiseAutoEncoder
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import StringIO
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in xrange(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
# visualize_dnn(dbn)
file_number = len(valid_file_list)
for i in xrange(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x)
# predicted_parameter = test_out()
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in xrange(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg, in_dir, out_dir):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
#### parameter setting########
hidden_layers_sizes = cfg.hyper_params['hidden_layer_size']
file_id_list = []
if cfg.label_style == 'HTS':
ext = '.lab'
else:
ext = '.utt'
synth_utts = glob.glob(in_dir + '/*' + ext)
for fname in synth_utts:
junk,name = os.path.split(fname)
file_id_list.append(name.replace(ext,''))
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
###total file number including training, development, and testing
#total_file_number = len(file_id_list)
data_dir = cfg.data_dir
#nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
#nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(out_dir, 'gen')
#in_file_list_dict = {}
#for feature_name in cfg.in_dir_dict.keys():
# in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
#nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
#nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
# the number can be removed
binary_label_dir = os.path.join(out_dir, 'lab_bin')
nn_label_norm_dir = os.path.join(out_dir, 'lab_bin_norm')
in_label_align_file_list = prepare_file_path_list(file_id_list, in_dir, cfg.lab_ext)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
## need this to find normalisation info:
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.label_style == 'HTS':
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
else:
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.iteritems():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, in_dir, cfg.utt_ext, False)
elif label_style == 'hts':
logger.critical('script not tested with HTS labels')
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in xrange(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.iteritems():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.itervalues():
fd.close()
# no silence removal for synthesis ...
## minmax norm:
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
# reload stored minmax values: (TODO -- move reading and writing into MinMaxNormalisation class)
fid = open(label_norm_file, 'rb')
## This doesn't work -- precision is lost -- reads in as float64
#label_norm_info = numpy.fromfile(fid) ## label_norm_info = numpy.array(label_norm_info, 'float32')
## use struct to enforce float32:
nbytes = os.stat(label_norm_file)[6] # length in bytes
data = fid.read(nbytes) # = read until bytes run out
fid.close()
m = nbytes / 4 ## number 32 bit floats
format = str(m)+"f"
label_norm_info = struct.unpack(format, data)
label_norm_info = numpy.array(label_norm_info)
min_max_normaliser.min_vector = label_norm_info[:m/2]
min_max_normaliser.max_vector = label_norm_info[m/2:]
### apply precompuated min-max to the whole dataset
min_max_normaliser.normalise_data(binary_label_file_list, nn_label_norm_file_list)
### make output acoustic data
# if cfg.MAKECMP:
### retrieve acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
var_file_dict = {}
for feature_name in cfg.out_dimension_dict.keys():
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
# if cfg.NORMCMP:
combined_model_arch = str(len(hidden_layers_sizes))
for hid_size in hidden_layers_sizes:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.model' \
%(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number)
### DNN model training
# if cfg.TRAINDNN:
##if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(file_id_list, gen_dir, cfg.cmp_ext)
dnn_generation(nn_label_norm_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict)
logger.info('Simple variance expansion')
test_var_scaling=False
scaled_dir = gen_dir + '_scaled'
if test_var_scaling:
file_id_list = simple_scale_variance_CONTINUUM(gen_dir, scaled_dir, var_file_dict, cfg.out_dimension_dict, file_id_list)
else:
simple_scale_variance(gen_dir, scaled_dir, var_file_dict, cfg.out_dimension_dict, file_id_list, gv_weight=1.0) ## gv_weight hard coded here!
### generate wav ----
#if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
#generate_wav_glottHMM(scaled_dir, file_id_list)
generate_wav(scaled_dir, file_id_list, cfg)
def simple_scale_variance(indir, outdir, var_file_dict, out_dimension_dict, file_id_list, gv_weight=1.0):
## simple variance scaling (silen et al. 2012, paragraph 3.1)
## TODO: Lots of things like stream names hardcoded here; 3 for delta + delta-delta; ...
# all_streams = ['cmp','HNR','F0','LSF','Gain','LSFsource']
# streams_to_scale = ['LSF']
all_streams = ['cmp','mgc','lf0','bap']
streams_to_scale = ['mgc']
static_variances = {}
static_dimension_dict = {}
for (feature_name,size) in out_dimension_dict.items():
static_dimension_dict[feature_name] = size/3
io_funcs = BinaryIOCollection()
for feature_name in var_file_dict.keys():
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
static_var_values = var_values[:static_dimension_dict[feature_name], :]
static_variances[feature_name] = static_var_values
if not os.path.isdir(outdir):
os.makedirs(outdir)
assert gv_weight <= 1.0 and gv_weight >= 0.0
local_weight = 1.0 - gv_weight
for uttname in file_id_list:
for stream in all_streams:
infile = os.path.join(indir, uttname + '.' + stream)
outfile = os.path.join(outdir, uttname + '.' + stream)
if not os.path.isfile(infile):
sys.exit(infile + ' does not exist')
if stream in streams_to_scale:
speech, dimension = io_funcs.load_binary_file_frame(infile, static_dimension_dict[stream])
utt_mean = numpy.mean(speech, axis=0)
utt_std = numpy.std(speech, axis=0)
global_std = numpy.transpose((static_variances[stream]))
weighted_global_std = (gv_weight * global_std) + (local_weight * utt_std)
std_ratio = weighted_global_std / utt_std
nframes, ndim = numpy.shape(speech)
utt_mean_matrix = numpy.tile(utt_mean, (nframes,1))
std_ratio_matrix = numpy.tile(std_ratio, (nframes,1))
scaled_speech = ((speech - utt_mean_matrix) * std_ratio_matrix) + utt_mean_matrix
io_funcs.array_to_binary_file(scaled_speech, outfile)
else:
os.system('cp %s %s'%(infile, outfile))
def simple_scale_variance_CONTINUUM(indir, outdir, var_file_dict, out_dimension_dict, file_id_list):
## Try range of interpolation weights for combining global & local variance
all_streams = ['cmp','HNR','F0','LSF','Gain','LSFsource']
streams_to_scale = ['LSF']
static_variances = {}
static_dimension_dict = {}
for (feature_name,size) in out_dimension_dict.items():
static_dimension_dict[feature_name] = size/3
io_funcs = BinaryIOCollection()
for feature_name in var_file_dict.keys():
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
static_var_values = var_values[:static_dimension_dict[feature_name], :]
static_variances[feature_name] = static_var_values
if not os.path.isdir(outdir):
os.makedirs(outdir)
file_id_list_out = []
for uttname in file_id_list:
for gv_weight in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
local_weight = 1.0 - gv_weight
for stream in all_streams:
infile = os.path.join(indir, uttname + '.' + stream)
extended_uttname = uttname + '_gv' + str(gv_weight)
print extended_uttname
outfile = os.path.join(outdir, extended_uttname + '.' + stream)
if not os.path.isfile(infile):
sys.exit(infile + ' does not exist')
if stream in streams_to_scale:
speech, dimension = io_funcs.load_binary_file_frame(infile, static_dimension_dict[stream])
utt_mean = numpy.mean(speech, axis=0)
utt_std = numpy.std(speech, axis=0)
global_std = numpy.transpose((static_variances[stream]))
weighted_global_std = (gv_weight * global_std) + (local_weight * utt_std)
std_ratio = weighted_global_std / utt_std
nframes, ndim = numpy.shape(speech)
utt_mean_matrix = numpy.tile(utt_mean, (nframes,1))
std_ratio_matrix = numpy.tile(std_ratio, (nframes,1))
scaled_speech = ((speech - utt_mean_matrix) * std_ratio_matrix) + utt_mean_matrix
io_funcs.array_to_binary_file(scaled_speech, outfile)
else:
os.system('cp %s %s'%(infile, outfile))
file_id_list_out.append(extended_uttname)
return file_id_list_out
def log_to_hertz(infile, outfile):
f = open(infile, 'r')
log_values = [float(val) for val in f.readlines()]
f.close()
def m2h(l):
h = math.exp(l)
return h
hertz = [m2h(l) for l in log_values]
f = open(outfile, 'w')
for val in hertz:
if val > 0:
f.write(str(val) + '\n')
else:
f.write('0.0\n')
f.close()
def generate_wav_glottHMM(gen_dir, gen_file_id_list):
x2x='~/repos/simple4all/CSTRVoiceClone/trunk/bin/x2x'
synthesis='~/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/tools/GlottHMM/Synthesis'
general_glott_conf = '~/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/voices/en/ky_02_toy/english_gold_basic_glott_KY/processors/speech_feature_extractor/main_config.cfg'
user_glott_conf = '~/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/voices/en/ky_02_toy/english_gold_basic_glott_KY/processors/speech_feature_extractor/user_config.cfg'
exports = 'export LIBCONFIG_INSTALL_DIR=/afs/inf.ed.ac.uk/user/o/owatts/sim2/oliver/nst_repos/OSSIAN/ossian-v.1.3/tools/GlottHMM//libconfig-1.4.9 ; export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIBCONFIG_INSTALL_DIR/lib/.libs ; export LIBRARY_PATH=$LIBRARY_PATH:$LIBCONFIG_INSTALL_DIR/lib/.libs ; export CPATH=$CPATH:$LIBCONFIG_INSTALL_DIR/lib ;'
streams = ['cmp','HNR','F0','LSF','Gain','LSFsource']
for uttname in gen_file_id_list:
all_present = True
for stream in streams:
if not os.path.isfile(os.path.join(gen_dir, uttname + '.' + stream)):
all_present = False
if all_present:
for stream in streams:
extra = ''
if stream == 'F0':
extra = '.NEGVALS'
fname = os.path.join(gen_dir, uttname + '.' + stream)
fname_txt = os.path.join(gen_dir, uttname + '.txt.' + stream + extra)
comm = '%s +fa %s > %s'%(x2x, fname, fname_txt)
os.system(comm)
log_to_hertz(os.path.join(gen_dir, uttname + '.txt.F0.NEGVALS'), \
os.path.join(gen_dir, uttname + '.txt.F0'))
stem_name = os.path.join(gen_dir, uttname + '.txt')
comm = '%s %s %s %s %s'%(exports, synthesis, stem_name, general_glott_conf, user_glott_conf)
print comm
os.system(comm)
else:
print 'missing stream(s) for utterance ' + uttname
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
#
# # set up logging to use our custom class
# logging.setLoggerClass(LoggerPlotter)
#
# # get a logger for this main function
# logger = logging.getLogger("main")
if len(sys.argv) != 4:
print 'usage: run_dnn.sh config_file_name in_dir out_dir'
#logger.critical('usage: run_dnn.sh config_file_name utt_dir')
sys.exit(1)
config_file = sys.argv[1]
in_dir = sys.argv[2]
out_dir = sys.argv[3]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
main_function(cfg, in_dir, out_dir)
sys.exit(0)
| apache-2.0 |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-stats-invgamma-1.py | 1 | 1091 | from scipy.stats import invgamma
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
a = 4.07
mean, var, skew, kurt = invgamma.stats(a, moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(invgamma.ppf(0.01, a),
invgamma.ppf(0.99, a), 100)
ax.plot(x, invgamma.pdf(x, a),
'r-', lw=5, alpha=0.6, label='invgamma pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = invgamma(a)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = invgamma.ppf([0.001, 0.5, 0.999], a)
np.allclose([0.001, 0.5, 0.999], invgamma.cdf(vals, a))
# True
# Generate random numbers:
r = invgamma.rvs(a, size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
| gpl-2.0 |
PSRCode/lttng-ci-1 | lava/rootfs/vmdeboostrap/generate-root.py | 2 | 3606 | #!/usr/bin/python3
# Copyright (C) 2018 - Jonathan Rajotte-Julien <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import gzip
import os
import shutil
import subprocess
from datetime import datetime
def compress(filename):
with open(filename, 'rb') as f_in:
with gzip.open('{}.gz'.format(filename), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
packages = [
'autoconf',
'automake',
'bash-completion',
'bison',
'bsdtar',
'build-essential',
'chrpath',
'clang',
'cloc',
'cppcheck',
'curl',
'elfutils',
'flex',
'gettext',
'git',
'htop',
'jq',
'libdw-dev',
'libelf-dev',
'libffi-dev',
'libglib2.0-dev',
'libmount-dev',
'libnuma-dev',
'libpfm4-dev',
'libpopt-dev',
'libtap-harness-archive-perl',
'libtool',
'libxml2',
'libxml2-dev',
'netcat-traditional',
'openssh-server',
'psmisc',
'python-virtualenv',
'python3',
'python3-dev',
'python3-numpy',
'python3-pandas',
'python3-pip',
'python3-setuptools',
'python3-sphinx',
'stress',
'swig',
'texinfo',
'tree',
'uuid-dev',
'vim',
'wget',
]
def main():
parser = argparse.ArgumentParser(description='Generate lava lttng rootfs')
parser.add_argument("--arch", default='amd64')
# We are using xenial instead of bionic ++ since some syscall test depends
# on cat and the libc to use the open syscall. In recent libc openat is
# used. See these commit in lttng-tools that helps with the problem:
# c8e51d1559c48a12f18053997bbcff0c162691c4
# 192bd8fb712659b9204549f29d9a54dc2c57a9e
# These are only part of 2.11 and were not backported since they do not
# represent a *problem* per se.
parser.add_argument("--distribution", default='xenial')
parser.add_argument("--mirror", default='http://archive.ubuntu.com/ubuntu')
parser.add_argument(
"--component", default='universe,multiverse,main,restricted')
args = parser.parse_args()
name = "rootfs_{}_{}_{}.tar".format(args.arch, args.distribution,
datetime.now().strftime("%Y-%m-%d"))
hostname = "linaro-server"
user = "linaro/linaro"
root_password = "root"
print(name)
command = [
"sudo",
"vmdebootstrap",
"--arch={}".format(args.arch),
"--distribution={}".format(args.distribution),
"--mirror={}".format(args.mirror),
"--debootstrapopts=components={}".format(args.component),
"--tarball={}".format(name),
"--package={}".format(",".join(packages)),
"--hostname={}".format(hostname),
"--user={}".format(user),
"--root-password={}".format(root_password),
"--no-kernel",
"--verbose",
]
completed_command = subprocess.run(command, check=True)
compress(name)
if __name__ == "__main__":
main()
| gpl-2.0 |
DEAP/deap | examples/coev/coop_adapt.py | 12 | 5084 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the adaptation test from *Potter, M. and De Jong, K.,
2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.3. A species is added each 100 generations.
"""
import random
try:
import matplotlib.pyplot as plt
except ImportError:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
TARGET_SIZE = 30
NUM_SPECIES = 1
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
if plt:
toolbox.register("evaluate_nonoise", coop_base.matchSetStrengthNoNoise)
def main(extended=True, verbose=True):
target_set = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 300
adapt_length = 100
g = 0
add_next = [adapt_length]
for i in range(len(schematas)):
target_set.extend(toolbox.target_set(schematas[i], int(TARGET_SIZE/len(schematas))))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
# Init with random a representative for each species
representatives = [random.choice(s) for s in species]
if plt and extended:
# We must save the match strength to plot them
t1, t2, t3 = list(), list(), list()
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for i, s in enumerate(species):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
r = representatives[:i] + representatives[i+1:]
for ind in s:
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=i, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
g += 1
if plt and extended:
# Compute the match strength without noise for the
# representatives on the three schematas
t1.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[0], 1), noise)[0])
t2.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[1], 1), noise)[0])
t3.append(toolbox.evaluate_nonoise(representatives,
toolbox.target_set(schematas[2], 1), noise)[0])
representatives = next_repr
# Add a species at every *adapt_length* generation
if add_next[-1] <= g < ngen:
species.append(toolbox.species())
representatives.append(random.choice(species[-1]))
add_next.append(add_next[-1] + adapt_length)
if extended:
for r in representatives:
# print individuals without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if plt and extended:
# Do the final plotting
plt.plot(t1, '-', color="k", label="Target 1")
plt.plot(t2, '--', color="k", label="Target 2")
plt.plot(t3, ':', color="k", label="Target 3")
max_t = max(max(t1), max(t2), max(t3))
for n in add_next:
plt.plot([n, n], [0, max_t + 1], "--", color="k")
plt.legend(loc="lower right")
plt.axis([0, ngen, 0, max_t + 1])
plt.xlabel("Generations")
plt.ylabel("Number of matched bits")
plt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
ElDeveloper/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 66 | 8261 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
def test_singular_values():
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
rpca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12)
# Compare to the Frobenius norm
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 12)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=3, algorithm='arpack',
random_state=rng)
rpca = TruncatedSVD(n_components=3, algorithm='randomized',
random_state=rng)
X_apca = apca.fit_transform(X)
X_rpca = rpca.fit_transform(X)
X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0))
X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0))
X_apca[:, 0] *= 3.142
X_apca[:, 1] *= 2.718
X_rpca[:, 0] *= 3.142
X_rpca[:, 1] *= 2.718
X_hat_apca = np.dot(X_apca, apca.components_)
X_hat_rpca = np.dot(X_rpca, rpca.components_)
apca.fit(X_hat_apca)
rpca.fit(X_hat_rpca)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/indexes/numeric.py | 3 | 13025 | import numpy as np
from pandas._libs import (index as libindex,
algos as libalgos, join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal, pandas_dtype,
is_float_dtype, is_object_dtype,
is_integer_dtype, is_scalar)
from pandas.core.common import _asarray_tuplesafe, _values_from_object
from pandas import compat
from pandas.core import algorithms
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.indexes.base as ibase
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
def _convert_tolerance(self, tolerance):
try:
return float(tolerance)
except ValueError:
raise ValueError('tolerance argument for %s must be numeric: %r' %
(type(self).__name__, tolerance))
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects.
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra="""This is the default index type used
by the DataFrame and Series ctors when no explicit
index is provided by the user.
"""
)
class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_arrmap = libalgos.arrmap_int64
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
_outer_indexer = libjoin.outer_join_indexer_int64
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_arrmap = libalgos.arrmap_uint64
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
_outer_indexer = libjoin.outer_join_indexer_uint64
_can_hold_na = False
_na_value = 0
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = _asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return _asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_arrmap = libalgos.arrmap_float64
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
_outer_indexer = libjoin.outer_join_indexer_float64
_default_dtype = np.float64
@property
def inferred_type(self):
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_float_dtype(dtype):
values = self._values.astype(dtype, copy=copy)
elif is_integer_dtype(dtype):
if self.hasnans:
raise ValueError('cannot convert float NaN to integer')
values = self._values.astype(dtype, copy=copy)
elif is_object_dtype(dtype):
values = self._values.astype('object', copy=copy)
else:
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(values, name=self.name, dtype=dtype)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
return False
except:
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| mit |
sonalranjit/SECS | EICS_krig.py | 2 | 8378 | __author__ ="Sonal Ranjit"
'''
This script parses through a GOCE satellite data that includes time information and geodetic coordinates, and based on
a EICS grid for that time the horizontal components of the Ionospheric current is krigged for the satellite postion.
The package used for kriging is geostatsmodel developed by Connor Johnson.
A simple example can be found at: http://connor-johnson.com/2014/03/20/simple-kriging-in-python/
The github for the project can be accessed at: https://github.com/cjohnson318/geostatsmodels
'''
from geostatsmodels import model, kriging
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import os
from math import *
def lla2ecef(lla):
''' This functions converts and inputted matrix of geodetic coordinates of size nx3 to ECEF coordinates on
the ellipsoid so ellipsoid height =0, then outputs it.
--------------------------------------------------------------
INPUT: A row vector or a matrix of geodetic coordinates, Latitude, Longitude, and height. the height in this case
can be 0 or for the GOCE data type it can just be the instrument value since the coordinates are going to be
converted only to the surface of the ellipsoid.
---------------------------------------------------------------
OUTPUT: A row vector or a matrix of ECEF coordinates, X, Y , and height or the instrument value in this case.
'''
#Constats WGS84
a = 6378137.
b = 6356752.3142
e2 = 1-(b**2)/(a**2)
# check for 1D case:
dim = len(lla.shape)
if dim == 1:
lla = np.reshape(lla,(1,3))
# convert lat and lon to radians
lat = lla[:,0]/180.*pi
lon = (lla[:,1]+360)/180.*pi
# preallocate the output vector for the ECEF coordinates
xyz = np.array(np.zeros(lla.shape))
# Radius of the prime vertical
N = a/np.sqrt(1-e2*np.sin(lat)*np.sin(lat))
# Calculate the X-coordinate
xyz[:,0] = (N)*np.cos(lat)*np.cos(lon)
# Calculate the Y-coordinate
xyz[:,1] = N*np.sin(lon)*np.cos(lat)
# Keep the SECS data as it is
xyz[:,2] = lla[:,2]
#return the ECEF coordinates
return np.array(xyz)
def plot_grid(EIC_grid,sat_latlon,ptz_u,ptz_v,title):
'''
This function plots a scatter map of the EICS grid and its horizontal components, and the krigged value for the
satellite.
:param EIC_grid: The EICS grid
:param satPos: The position of the satellite
:param ptz_u: The krigged u component of the satellite
:param ptz_v: The krigged v component of the satllite
:param title: Timestamp of the satellite
:return: The figure
'''
# Define the size of the figure in inches
plt.figure(figsize=(18,18))
# The horizontal components of the Ionospheric current from the EICS grid
u = EIC_grid[:,2]
v = EIC_grid[:,3]
'''
The m variable defines the basemap of area that is going to be displayed.
1) width and height is the area in pixels of the area to be displayed.
2) resolution is the resolution of the boundary dataset being used 'c' for crude and 'l' for low
3) projection is type of projection of the basemape, in this case it is a Lambert Azimuthal Equal Area projection
4) lat_ts is the latitude of true scale,
5) lat_0 and lon_0 is the latitude and longitude of the central point of the basemap
'''
m = Basemap(width=8000000, height=8000000, resolution='l', projection='laea',\
lat_ts=min(EIC_grid[:,0]), lat_0=np.median(EIC_grid[:,0]),lon_0=-100.)
m.drawcoastlines() #draw the coastlines on the basemap
# draw parallels and meridians and label them
m.drawparallels(np.arange(-80.,81.,20.),labels=[1,0,0,0],fontsize=10)
m.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=10)
# Project the inputted grid into x,y values defined of the projected basemap parameters
x,y =m(EIC_grid[:,1],EIC_grid[:,0])
satx,saty = m(sat_latlon[0,1],sat_latlon[0,0])
'''
Plot the inputted grid as a quiver plot on the basemap,
1) x,y are the projected latitude and longitude coordinates of the grid
2) u and v are the horizontal components of the current
3) the EICS grid values are plotted in blue color where as the satellite krigged values are in red
'''
m.quiver(x,y,u,v,width = 0.004, scale=10000,color='#0000FF')
m.quiver(satx,saty,ptz_u[0],ptz_v[0],color='#FF0000',width=0.004, scale = 10000)
m.scatter(satx,saty,s=400,facecolors='none',edgecolors='#66FF66',linewidth='5')
plt.title(title)
plt.savefig('/home/sonal/EICS_201104/figs/'+title+'.png',bbox_inches='tight',pad_inches=0.1)
#plt.show()
plt.clf()
'''
Main part of the script, here the GOCE satellite time stamp and position is loaded. Then for each timestap and position
the corresponding EICS grid is searched. If there is a EICS grid for the current time of the satellite then, using
the EICS grid the horizontal components value is krigged for the current satellite position, and a figure plotted and saved.
'''
#Load the GOCE satellite data
sat_data = np.loadtxt('/home/sonal/SECS/sat_track_201104.txt')
# add an extra column in the satellite data for it be replaced with the krigged value
zero_col = np.zeros((len(sat_data),2))
sat_data = np.column_stack((sat_data,zero_col))
prev_min = []
# Iterate through the whole matrix of satellite data
for i in range(len(sat_data)):
'''
This section of the loop parses the time information from the satellite data to form a string which is used to check
if a EICS grid exists for that time.
'''
# Define the path to where all the EICS grids lie
eics_path = '/home/sonal/EICS_201104/'
# Extract the Year, Month, Day, Hour, Minutes, Seconds from the satellite data.
sat_y = str(int(sat_data[i,0]))
sat_m = str(int(sat_data[i,1])).zfill(2)
sat_d = str(int(sat_data[i,2])).zfill(2)
sat_ymd = sat_y+sat_m+sat_d
sat_h = str(int(sat_data[i,3])).zfill(2)
sat_mins = str(int(sat_data[i,4])).zfill(2)
sat_secs = str(int(floor(sat_data[i,5]))).zfill(2)
sat_hms = sat_h+sat_mins+sat_secs
# Concatenate all the time information to a single string to see if the SECS grid exists
EICS_file = eics_path+'EICS'+sat_ymd+'/'+sat_d+'/'+'EICS'+sat_ymd+'_'+sat_hms+'.dat'
'''
This sections checks if there is a EICS grid available for the current time instance of the satellite data, if it
does exists then the EICS grid is loaded, and using the grid a value is krigged for the current satellite position
on the grid.
'''
#if os.path.exists(EICS_file):
if os.path.exists(EICS_file) and (sat_mins !=prev_min):
print "Processing file "+str(i)+" of "+str(len(sat_data))
# Load the EICS grid and convert to ECEF
EIC_grid = np.loadtxt(EICS_file)
eic_u = EIC_grid[:,:3]
eic_v = np.column_stack((EIC_grid[:,:2],EIC_grid[:,3]))
#The EICS grid contains 2 values for the horizontal components have to separate them and krig it separately
eic_xyu = lla2ecef(eic_u)
eic_xyv = lla2ecef(eic_v)
# Load the Satellite position and convert it to ECEF
sat_latlon = np.zeros((1,3))
sat_latlon[:,(0,1)] = sat_data[i,(6,7)]
sat_latlon[:,1] = sat_latlon[:,1]-360
sat_xyz = lla2ecef(sat_latlon)
# Determine the sill for the semivariance model of the grid
sill_u = np.var(eic_xyu[:,2])
sill_v = np.var(eic_xyv[:,2])
# Define the type of semivariance model to be used for kriging
covfct_u = model.covariance(model.exponential,(900000, sill_u))
covfct_v = model.covariance(model.exponential,(900000, sill_v))
# Krig the value for the satellite position using simple kriging and the defined semivariance model and 10
# neighbouring points
ptz_u = kriging.simple(eic_xyu,covfct_u,sat_xyz[:,:2],N=10)
ptz_v = kriging.simple(eic_xyv,covfct_v,sat_xyz[:,:2],N=10)
# Add the krigged value to the different variables
sat_data[i,8] = ptz_u[0]
sat_data[i,9] = ptz_v[0]
timestamp = sat_ymd+sat_hms
#Call the plotting function to plot the grid and krigged values
plot_grid(EIC_grid,sat_latlon,ptz_u,ptz_v,timestamp)
prev_min = sat_mins
#np.savetxt('sat_EICS_april_krigged.txt',sat_data,delimiter='\t')
| gpl-2.0 |
sildar/potara | potara/takahe.py | 1 | 58851 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
:Name:
takahe
:Authors:
Florian Boudin ([email protected])
:Version:
0.4
:Date:
Mar. 2013
:Description:
takahe is a multi-sentence compression module. Given a set of redundant
sentences, a word-graph is constructed by iteratively adding sentences to
it. The best compression is obtained by finding the shortest path in the
word graph. The original algorithm was published and described in
[filippova:2010:COLING]_. A keyphrase-based reranking method, described in
[boudin-morin:2013:NAACL]_ can be applied to generate more informative
compressions.
.. [filippova:2010:COLING] Katja Filippova, Multi-Sentence Compression:
Finding Shortest Paths in Word Graphs, *Proceedings of the 23rd
International Conference on Computational Linguistics (Coling 2010)*,
pages 322-330, 2010.
.. [boudin-morin:2013:NAACL] Florian Boudin and Emmanuel Morin, Keyphrase
Extraction for N-best Reranking in Multi-Sentence Compression,
*Proceedings of the 2013 Conference of the North American Chapter of the
Association for Computational Linguistics: Human Language Technologies
(NAACL-HLT 2013)*, 2013.
:History:
Development history of the takahe module:
- 0.4 (Mar. 2013) adding the keyphrase-based nbest reranking algorithm
- 0.33 (Feb. 2013), bug fixes and better code documentation
- 0.32 (Jun. 2012), Punctuation marks are now considered within the
graph, compressions are then punctuated
- 0.31 (Nov. 2011), modified context function (uses the left and right
contexts), improved docstring documentation, bug fixes
- 0.3 (Oct. 2011), improved K-shortest paths algorithm including
verb/size constraints and ordered lists for performance
- 0.2 (Dec. 2010), removed dependencies from nltk (i.e. POS-tagging,
tokenization and stopwords removal)
- 0.1 (Nov. 2010), first version
:Dependencies:
The following Python modules are required:
- `networkx <http://networkx.github.com/>`_ for the graph construction
(v1.2+)
:Usage:
A typical usage of this module is::
import takahe
# A list of tokenized and POS-tagged sentences
sentences = ['Hillary/NNP Clinton/NNP wanted/VBD to/stop visit/VB ...']
# Create a word graph from the set of sentences with parameters :
# - minimal number of words in the compression : 6
# - language of the input sentences : en (english)
# - POS tag for punctuation marks : PUNCT
compresser = takahe.word_graph( sentences,
nb_words = 6,
lang = 'en',
punct_tag = "PUNCT" )
# Get the 50 best paths
candidates = compresser.get_compression(50)
# 1. Rerank compressions by path length (Filippova's method)
for cummulative_score, path in candidates:
# Normalize path score by path length
normalized_score = cummulative_score / len(path)
# Print normalized score and compression
print round(normalized_score, 3), ' '.join([u[0] for u in path])
# Write the word graph in the dot format
compresser.write_dot('test.dot')
# 2. Rerank compressions by keyphrases (Boudin and Morin's method)
reranker = takahe.keyphrase_reranker( sentences,
candidates,
lang = 'en' )
reranked_candidates = reranker.rerank_nbest_compressions()
# Loop over the best reranked candidates
for score, path in reranked_candidates:
# Print the best reranked candidates
print round(score, 3), ' '.join([u[0] for u in path])
:Misc:
The Takahe is a flightless bird indigenous to New Zealand. It was thought to
be extinct after the last four known specimens were taken in 1898. However,
after a carefully planned search effort the bird was rediscovered by on
November 20, 1948. (Wikipedia, http://en.wikipedia.org/wiki/takahe)
"""
import math
import codecs
import os
import re
import sys
import bisect
import networkx as nx
#import matplotlib.pyplot as plt
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
# [ Class word_graph
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
class word_graph:
"""
The word_graph class constructs a word graph from the set of sentences given
as input. The set of sentences is a list of strings, sentences are tokenized
and words are POS-tagged (e.g. ``"Saturn/NNP is/VBZ the/DT sixth/JJ
planet/NN from/IN the/DT Sun/NNP in/IN the/DT Solar/NNP System/NNP"``).
Four optional parameters can be specified:
- nb_words is is the minimal number of words for the best compression
(default value is 8).
- lang is the language parameter and is used for selecting the correct
stopwords list (default is "en" for english, stopword lists are localized
in /resources/ directory).
- punct_tag is the punctuation mark tag used during graph construction
(default is PUNCT).
"""
#-T-----------------------------------------------------------------------T-
def __init__(self, sentence_list, nb_words=8, lang="en", punct_tag="PUNCT", pos_sep="/"):
self.sentence = list(sentence_list)
""" A list of sentences provided by the user. """
self.length = len(sentence_list)
""" The number of sentences given for fusion. """
self.nb_words = nb_words
""" The minimal number of words in the compression. """
self.resources = os.path.dirname(__file__) + '/resources/'
""" The path of the resources folder. """
self.stopword_path = self.resources+'stopwords.'+lang+'.dat'
""" The path of the stopword list, e.g. stopwords.[lang].dat. """
self.stopwords = self.load_stopwords(self.stopword_path)
""" The set of stopwords loaded from stopwords.[lang].dat. """
self.punct_tag = punct_tag
""" The stopword tag used in the graph. """
self.graph = nx.DiGraph()
""" The directed graph used for fusion. """
self.start = '-start-'
""" The start token in the graph. """
self.stop = '-end-'
""" The end token in the graph. """
self.sep = '/-/'
""" The separator used between a word and its POS in the graph. """
self.pos_sep = pos_sep
self.term_freq = {}
""" The frequency of a given term. """
self.verbs = set(['VB', 'VBD', 'VBP', 'VBZ', 'VH', 'VHD', 'VHP', 'VBZ',
'VV', 'VVD', 'VVP', 'VVZ'])
"""
The list of verb POS tags required in the compression. At least *one*
verb must occur in the candidate compressions.
"""
# Replacing default values for French
if lang == "fr":
self.verbs = set(['V', 'VPP', 'VINF'])
# 1. Pre-process the sentences
self.pre_process_sentences()
# 2. Compute term statistics
self.compute_statistics()
# 3. Build the word graph
self.build_graph()
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def pre_process_sentences(self):
"""
Pre-process the list of sentences given as input. Split sentences using
whitespaces and convert each sentence to a list of (word, POS) tuples.
"""
for i in range(self.length):
# Normalise extra white spaces
self.sentence[i] = re.sub(' +', ' ', self.sentence[i])
self.sentence[i] = self.sentence[i].strip()
# Tokenize the current sentence in word/POS
sentence = self.sentence[i].split(' ')
# Creating an empty container for the cleaned up sentence
container = [(self.start, self.start)]
# Looping over the words
for w in sentence:
# Splitting word, POS
m = re.match("^(.+)/(.+)$", w)
# Extract the word information
token, POS = m.group(1), m.group(2)
# Add the token/POS to the sentence container
container.append((token.lower(), POS))
# Add the stop token at the end of the container
container.append((self.stop, self.stop))
# Recopy the container into the current sentence
self.sentence[i] = container
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def build_graph(self):
"""
Constructs a directed word graph from the list of input sentences. Each
sentence is iteratively added to the directed graph according to the
following algorithm:
- Word mapping/creation is done in four steps:
1. non-stopwords for which no candidate exists in the graph or for
which an unambiguous mapping is possible or which occur more than
once in the sentence
2. non-stopwords for which there are either several possible
candidates in the graph
3. stopwords
4. punctuation marks
For the last three groups of words where mapping is ambiguous we check
the immediate context (the preceding and following words in the sentence
and the neighboring nodes in the graph) and select the candidate which
has larger overlap in the context, or the one with a greater frequency
(i.e. the one which has more words mapped onto it). Stopwords are mapped
only if there is some overlap in non-stopwords neighbors, otherwise a
new node is created. Punctuation marks are mapped only if the preceding
and following words in the sentence and the neighboring nodes are the
same.
- Edges are then computed and added between mapped words.
Each node in the graph is represented as a tuple ('word/POS', id) and
possesses an info list containing (sentence_id, position_in_sentence)
tuples.
"""
# Iteratively add each sentence in the graph ---------------------------
for i in range(self.length):
# Compute the sentence length
sentence_len = len(self.sentence[i])
# Create the mapping container
mapping = [0] * sentence_len
#-------------------------------------------------------------------
# 1. non-stopwords for which no candidate exists in the graph or for
# which an unambiguous mapping is possible or which occur more
# than once in the sentence.
#-------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If stopword or punctuation mark, continues
if token in self.stopwords or re.search('(?u)^\W$', token):
continue
# Create the node identifier
node = token.lower() + self.sep + POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# If there is no node in the graph, create one with id = 0
if k == 0:
# Add the node in the graph
self.graph.add_node( (node, 0), info=[(i, j)],
label=token.lower() )
# Mark the word as mapped to k
mapping[j] = (node, 0)
# If there is only one matching node in the graph (id is 0)
elif k == 1:
# Get the sentences id of this node
ids = []
for sid, pos_s in self.graph.node[(node, 0)]['info']:
ids.append(sid)
# Update the node in the graph if not same sentence
if not i in ids:
self.graph.node[(node, 0)]['info'].append((i, j))
mapping[j] = (node, 0)
# Else Create new node for redundant word
else:
self.graph.add_node( (node, 1), info=[(i, j)],
label=token.lower() )
mapping[j] = (node, 1)
#-------------------------------------------------------------------
# 2. non-stopwords for which there are either several possible
# candidates in the graph.
#-------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If stopword or punctuation mark, continues
if token in self.stopwords or re.search('(?u)^\W$', token):
continue
# If word is not already mapped to a node
if mapping[j] == 0:
# Create the node identifier
node = token.lower() + self.sep + POS
# Create the neighboring nodes identifiers
prev_token, prev_POS = self.sentence[i][j-1]
next_token, next_POS = self.sentence[i][j+1]
prev_node = prev_token.lower() + self.sep + prev_POS
next_node = next_token.lower() + self.sep + next_POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# Search for the ambiguous node with the larger overlap in
# context or the greater frequency.
ambinode_overlap = []
ambinode_frequency = []
# For each ambiguous node
for l in range(k):
# Get the immediate context words of the nodes
l_context = self.get_directed_context(node, l, 'left')
r_context = self.get_directed_context(node, l, 'right')
# Compute the (directed) context sum
val = l_context.count(prev_node)
val += r_context.count(next_node)
# Add the count of the overlapping words
ambinode_overlap.append(val)
# Add the frequency of the ambiguous node
ambinode_frequency.append(
len( self.graph.node[(node, l)]['info'] )
)
# Search for the best candidate while avoiding a loop
found = False
selected = 0
while not found:
# Select the ambiguous node
selected = self.max_index(ambinode_overlap)
if ambinode_overlap[selected] == 0:
selected = self.max_index(ambinode_frequency)
# Get the sentences id of this node
ids = []
for sid, p in self.graph.node[(node, selected)]['info']:
ids.append(sid)
# Test if there is no loop
if i not in ids:
found = True
break
# Remove the candidate from the lists
else:
del ambinode_overlap[selected]
del ambinode_frequency[selected]
# Avoid endless loops
if len(ambinode_overlap) == 0:
break
# Update the node in the graph if not same sentence
if found:
self.graph.node[(node, selected)]['info'].append((i, j))
mapping[j] = (node, selected)
# Else create new node for redundant word
else:
self.graph.add_node( (node, k), info=[(i, j)],
label=token.lower() )
mapping[j] = (node, k)
#-------------------------------------------------------------------
# 3. map the stopwords to the nodes
#-------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If *NOT* stopword, continues
if not token in self.stopwords :
continue
# Create the node identifier
node = token.lower() + self.sep + POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# If there is no node in the graph, create one with id = 0
if k == 0:
# Add the node in the graph
self.graph.add_node( (node, 0), info=[(i, j)],
label=token.lower() )
# Mark the word as mapped to k
mapping[j] = (node, 0)
# Else find the node with overlap in context or create one
else:
# Create the neighboring nodes identifiers
prev_token, prev_POS = self.sentence[i][j-1]
next_token, next_POS = self.sentence[i][j+1]
prev_node = prev_token.lower() + self.sep + prev_POS
next_node = next_token.lower() + self.sep + next_POS
ambinode_overlap = []
# For each ambiguous node
for l in range(k):
# Get the immediate context words of the nodes, the
# boolean indicates to consider only non stopwords
l_context = self.get_directed_context(node, l, 'left',\
True)
r_context = self.get_directed_context(node, l, 'right',\
True)
# Compute the (directed) context sum
val = l_context.count(prev_node)
val += r_context.count(next_node)
# Add the count of the overlapping words
ambinode_overlap.append(val)
# Get best overlap candidate
selected = self.max_index(ambinode_overlap)
# Get the sentences id of the best candidate node
ids = []
for sid, pos_s in self.graph.node[(node, selected)]['info']:
ids.append(sid)
# Update the node in the graph if not same sentence and
# there is at least one overlap in context
if i not in ids and ambinode_overlap[selected] > 0:
# if i not in ids and \
# (ambinode_overlap[selected] > 1 and POS==self.punct_tag) or\
# (ambinode_overlap[selected] > 0 and POS!=self.punct_tag) :
# Update the node in the graph
self.graph.node[(node, selected)]['info'].append((i, j))
# Mark the word as mapped to k
mapping[j] = (node, selected)
# Else create a new node
else:
# Add the node in the graph
self.graph.add_node( (node, k) , info=[(i, j)],
label=token.lower() )
# Mark the word as mapped to k
mapping[j] = (node, k)
#-------------------------------------------------------------------
# 4. lasty map the punctuation marks to the nodes
#-------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If *NOT* punctuation mark, continues
if not re.search('(?u)^\W$', token):
continue
# Create the node identifier
node = token.lower() + self.sep + POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# If there is no node in the graph, create one with id = 0
if k == 0:
# Add the node in the graph
self.graph.add_node( (node, 0), info=[(i, j)],
label=token.lower() )
# Mark the word as mapped to k
mapping[j] = (node, 0)
# Else find the node with overlap in context or create one
else:
# Create the neighboring nodes identifiers
prev_token, prev_POS = self.sentence[i][j-1]
next_token, next_POS = self.sentence[i][j+1]
prev_node = prev_token.lower() + self.sep + prev_POS
next_node = next_token.lower() + self.sep + next_POS
ambinode_overlap = []
# For each ambiguous node
for l in range(k):
# Get the immediate context words of the nodes
l_context = self.get_directed_context(node, l, 'left')
r_context = self.get_directed_context(node, l, 'right')
# Compute the (directed) context sum
val = l_context.count(prev_node)
val += r_context.count(next_node)
# Add the count of the overlapping words
ambinode_overlap.append(val)
# Get best overlap candidate
selected = self.max_index(ambinode_overlap)
# Get the sentences id of the best candidate node
ids = []
for sid, pos_s in self.graph.node[(node, selected)]['info']:
ids.append(sid)
# Update the node in the graph if not same sentence and
# there is at least one overlap in context
if i not in ids and ambinode_overlap[selected] > 1:
# Update the node in the graph
self.graph.node[(node, selected)]['info'].append((i, j))
# Mark the word as mapped to k
mapping[j] = (node, selected)
# Else create a new node
else:
# Add the node in the graph
self.graph.add_node( (node, k), info=[(i, j)],
label=token.lower() )
# Mark the word as mapped to k
mapping[j] = (node, k)
#-------------------------------------------------------------------
# 4. Connects the mapped words with directed edges
#-------------------------------------------------------------------
for j in range(1, len(mapping)):
self.graph.add_edge(mapping[j-1], mapping[j])
# Assigns a weight to each node in the graph ---------------------------
for node1, node2 in self.graph.edges_iter():
edge_weight = self.get_edge_weight(node1, node2)
self.graph.add_edge(node1, node2, weight=edge_weight)
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def ambiguous_nodes(self, node):
"""
Takes a node in parameter and returns the number of possible candidate
(ambiguous) nodes in the graph.
"""
k = 0
while(self.graph.has_node((node, k))):
k += 1
return k
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def get_directed_context(self, node, k, dir='all', non_pos=False):
"""
Returns the directed context of a given node, i.e. a list of word/POS of
the left or right neighboring nodes in the graph. The function takes
four parameters :
- node is the word/POS tuple
- k is the node identifier used when multiple nodes refer to the same
word/POS (e.g. k=0 for (the/DET, 0), k=1 for (the/DET, 1), etc.)
- dir is the parameter that controls the directed context calculation,
it can be set to left, right or all (default)
- non_pos is a boolean allowing to remove stopwords from the context
(default is false)
"""
# Define the context containers
l_context = []
r_context = []
# For all the sentence/position tuples
for sid, off in self.graph.node[(node, k)]['info']:
prev = self.sentence[sid][off-1][0].lower() + self.sep +\
self.sentence[sid][off-1][1]
next = self.sentence[sid][off+1][0].lower() + self.sep +\
self.sentence[sid][off+1][1]
if non_pos:
if self.sentence[sid][off-1][0] not in self.stopwords:
l_context.append(prev)
if self.sentence[sid][off+1][0] not in self.stopwords:
r_context.append(next)
else:
l_context.append(prev)
r_context.append(next)
# Returns the left (previous) context
if dir == 'left':
return l_context
# Returns the right (next) context
elif dir == 'right':
return r_context
# Returns the whole context
else:
l_context.extend(r_context)
return l_context
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def get_edge_weight(self, node1, node2):
"""
Compute the weight of an edge *e* between nodes *node1* and *node2*. It
is computed as e_ij = (A / B) / C with:
- A = freq(i) + freq(j),
- B = Sum (s in S) 1 / diff(s, i, j)
- C = freq(i) * freq(j)
A node is a tuple of ('word/POS', unique_id).
"""
# Get the list of (sentence_id, pos_in_sentence) for node1
info1 = self.graph.node[node1]['info']
# Get the list of (sentence_id, pos_in_sentence) for node2
info2 = self.graph.node[node2]['info']
# Get the frequency of node1 in the graph
# freq1 = self.graph.degree(node1)
freq1 = len(info1)
# Get the frequency of node2 in cluster
# freq2 = self.graph.degree(node2)
freq2 = len(info2)
# Initializing the diff function list container
diff = []
# For each sentence of the cluster (for s in S)
for s in range(self.length):
# Compute diff(s, i, j) which is calculated as
# pos(s, i) - pos(s, j) if pos(s, i) < pos(s, j)
# O otherwise
# Get the positions of i and j in s, named pos(s, i) and pos(s, j)
# As a word can appear at multiple positions in a sentence, a list
# of positions is used
pos_i_in_s = []
pos_j_in_s = []
# For each (sentence_id, pos_in_sentence) of node1
for sentence_id, pos_in_sentence in info1:
# If the sentence_id is s
if sentence_id == s:
# Add the position in s
pos_i_in_s.append(pos_in_sentence)
# For each (sentence_id, pos_in_sentence) of node2
for sentence_id, pos_in_sentence in info2:
# If the sentence_id is s
if sentence_id == s:
# Add the position in s
pos_j_in_s.append(pos_in_sentence)
# Container for all the diff(s, i, j) for i and j
all_diff_pos_i_j = []
# Loop over all the i, j couples
for x in range(len(pos_i_in_s)):
for y in range(len(pos_j_in_s)):
diff_i_j = pos_i_in_s[x] - pos_j_in_s[y]
# Test if word i appears *BEFORE* word j in s
if diff_i_j < 0:
all_diff_pos_i_j.append(-1.0*diff_i_j)
# Add the mininum distance to diff (i.e. in case of multiple
# occurrencies of i or/and j in sentence s), 0 otherwise.
if len(all_diff_pos_i_j) > 0:
diff.append(1.0/min(all_diff_pos_i_j))
else:
diff.append(0.0)
weight1 = freq1
weight2 = freq2
return ( (freq1 + freq1) / sum(diff) ) / (weight1 * weight2)
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def k_shortest_paths(self, start, end, k=10):
"""
Simple implementation of a k-shortest paths algorithms. Takes three
parameters: the starting node, the ending node and the number of
shortest paths desired. Returns a list of k tuples (path, weight).
"""
# Initialize the list of shortest paths
kshortestpaths = []
# Initializing the label container
orderedX = []
orderedX.append((0, start, 0))
# Initializing the path container
paths = {}
paths[(0, start, 0)] = [start]
# Initialize the visited container
visited = {}
visited[start] = 0
# Initialize the sentence container that will be used to remove
# duplicate sentences passing throught different nodes
sentence_container = {}
# While the number of shortest paths isn't reached or all paths explored
while len(kshortestpaths) < k and len(orderedX) > 0:
# Searching for the shortest distance in orderedX
shortest = orderedX.pop(0)
shortestpath = paths[shortest]
# Removing the shortest node from X and paths
del paths[shortest]
# Iterating over the accessible nodes
for node in self.graph.neighbors(shortest[1]):
# To avoid loops
if node in shortestpath:
continue
# Compute the weight to node
w = shortest[0] + self.graph[shortest[1]][node]['weight']
# If found the end, adds to k-shortest paths
if node == end:
#-T-------------------------------------------------------T-
# --- Constraints on the shortest paths
# 1. Check if path contains at least one werb
# 2. Check the length of the shortest path, without
# considering punctuation marks and starting node (-1 in
# the range loop, because nodes are reversed)
# 3. Check the paired parentheses and quotation marks
# 4. Check if sentence is not redundant
nb_verbs = 0
length = 0
paired_parentheses = 0
quotation_mark_number = 0
raw_sentence = ''
for i in range(len(shortestpath) - 1):
word, tag = shortestpath[i][0].split(self.sep)
# 1.
if tag in self.verbs:
nb_verbs += 1
# 2.
if not re.search('(?u)^\W$', word):
length += 1
# 3.
else:
if word == '(':
paired_parentheses -= 1
elif word == ')':
paired_parentheses += 1
elif word == '"':
quotation_mark_number += 1
# 4.
raw_sentence += word + ' '
# Remove extra space from sentence
raw_sentence = raw_sentence.strip()
if nb_verbs >0 and \
length >= self.nb_words and \
paired_parentheses == 0 and \
(quotation_mark_number%2) == 0 \
and raw_sentence not in sentence_container:
path = [node]
path.extend(shortestpath)
path.reverse()
weight = float(w) #/ float(length)
kshortestpaths.append((path, weight))
sentence_container[raw_sentence] = 1
#-B-------------------------------------------------------B-
else:
# test if node has already been visited
if node in visited:
visited[node] += 1
else:
visited[node] = 0
id = visited[node]
# Add the node to orderedX
bisect.insort(orderedX, (w, node, id))
# Add the node to paths
paths[(w, node, id)] = [node]
paths[(w, node, id)].extend(shortestpath)
# Returns the list of shortest paths
return kshortestpaths
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def get_compression(self, nb_candidates=50):
"""
Searches all possible paths from **start** to **end** in the word graph,
removes paths containing no verb or shorter than *n* words. Returns an
ordered list (smaller first) of nb (default value is 50) (cummulative
score, path) tuples. The score is not normalized with the sentence
length.
"""
for sentence in self.sentence:
hasVerb = False
for word, tag in sentence:
if tag in self.verbs:
hasVerb = True
if not hasVerb:
raise Exception("No verb in the provided sentences")
# Search for the k-shortest paths in the graph
self.paths = self.k_shortest_paths((self.start+self.sep+self.start, 0),
(self.stop+self.sep+self.stop, 0),
nb_candidates)
# Initialize the fusion container
fusions = []
# Test if there are some paths
if len(self.paths) > 0:
# For nb candidates
for i in range(min(nb_candidates, len(self.paths))):
nodes = self.paths[i][0]
sentence = []
for j in range(1, len(nodes)-1):
word, tag = nodes[j][0].split(self.sep)
sentence.append((word, tag))
bisect.insort(fusions, (self.paths[i][1], sentence))
return fusions
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def max_index(self, l):
""" Returns the index of the maximum value of a given list. """
ll = len(l)
if ll < 0:
return None
elif ll == 1:
return 0
max_val = l[0]
max_ind = 0
for z in range(1, ll):
if l[z] > max_val:
max_val = l[z]
max_ind = z
return max_ind
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def compute_statistics(self):
"""
This function iterates over the cluster's sentences and computes the
following statistics about each word:
- term frequency (self.term_freq)
"""
# Structure for containing the list of sentences in which a term occurs
terms = {}
# Loop over the sentences
for i in range(self.length):
# For each tuple (token, POS) of sentence i
for token, POS in self.sentence[i]:
# generate the word/POS token
node = token.lower() + self.sep + POS
# Add the token to the terms list
if node not in terms:
terms[node] = [i]
else:
terms[node].append(i)
# Loop over the terms
for w in terms:
# Compute the term frequency
self.term_freq[w] = len(terms[w])
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def load_stopwords(self, path):
"""
This function loads a stopword list from the *path* file and returns a
set of words. Lines begining by '#' are ignored.
"""
# Set of stopwords
stopwords = set([])
# For each line in the file
with codecs.open(path, 'r', 'utf-8') as f:
for line in f:
if not re.search('^#', line) and len(line.strip()) > 0:
stopwords.add(line.strip().lower())
# Return the set of stopwords
return stopwords
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def write_dot(self, dotfile):
""" Outputs the word graph in dot format in the specified file. """
nx.write_dot(self.graph, dotfile)
#-B-----------------------------------------------------------------------B-
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
# ] Ending word_graph class
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
# [ Class keyphrase_reranker
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
class keyphrase_reranker:
"""
The *keyphrase_reranker* reranks a list of compression candidates according
to the keyphrases they contain. Keyphrases are extracted from the set of
related sentences using a modified version of the TextRank method
[mihalcea-tarau:2004:EMNLP]_. First, an undirected weighted graph is
constructed from the set of sentences in which *nodes* are (lowercased word,
POS) tuples and *edges* represent co-occurrences. The TextRank algorithm is
then applied on the graph to assign a score to each word. Second, keyphrase
candidates are extracted from the set of sentences using POS syntactic
filtering. Keyphrases are then ranked according to the words they contain.
This class requires a set of related sentences (as a list of POS annotated
sentences) and the N-best compression candidates (as a list of (score, list
of (word, POS) tuples) tuples). The following optional parameters can be
specified:
- lang is the language parameter and is used for selecting the correct
POS tags used for filtering keyphrase candidates.
- patterns is a list of extra POS patterns (regexes) used for filtering
keyphrase candidates, default is ``^(JJ)*(NNP|NNS|NN)+$`` for English and
``^(ADJ)*(NC|NPP)+(ADJ)*$`` for French.
.. [mihalcea-tarau:2004:EMNLP] Rada Mihalcea and Paul Tarau, TextRank:
Bringing Order into Texts, Empirical Methods in Natural Language
Processing (EMNLP), 2004.
"""
#-T-----------------------------------------------------------------------T-
def __init__(self, sentence_list, nbest_compressions, lang="en",
patterns=[], stopwords=[]):
self.sentences = list(sentence_list)
""" The list of related sentences provided by the user. """
self.nbest_compressions = nbest_compressions
""" The nbest compression candidates provided by the user. """
self.graph = nx.Graph()
""" The graph used for keyphrase extraction. """
self.lang = lang
""" The language of the input sentences, default is English (en)."""
self.stopwords = set(stopwords)
""" The set of words to be excluded from keyphrase extraction. """
self.syntactic_filter = ['JJ', 'NNP', 'NNS', 'NN', 'NNPS']
""" The POS tags used for generating keyphrase candidates. """
self.keyphrase_candidates = {}
""" Keyphrase candidates generated from the set of sentences. """
self.word_scores = {}
""" Scores for each word computed with TextRank. """
self.keyphrase_scores = {}
""" Scores for each keyphrase candidate. """
self.syntactic_patterns = ['^(JJ)*(NNP|NNS|NN)+$']
""" Syntactic patterns for filtering keyphrase candidates. """
# Specific rules for French
if self.lang == "fr":
self.syntactic_filter = ['NPP', 'NC', 'ADJ']
self.syntactic_patterns = ['^(ADJ)*(NC|NPP)+(ADJ)*$']
# Add extra patterns
self.syntactic_patterns.extend(patterns)
# 1. Build the word graph from the sentences
self.build_graph()
# 2. Generate the keyphrase candidates
self.generate_candidates()
# 3. Compute the TextRank scores for each word in the graph
self.undirected_TextRank()
# 4. Compute the score of each keyphrase candidate
self.score_keyphrase_candidates()
# 5. Cluster keyphrases to remove redundancy
self.cluster_keyphrase_candidates()
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def build_graph(self, window=0):
"""
Build a word graph from the list of sentences. Each node in the graph
represents a word. An edge is created between two nodes if they co-occur
in a given window (default is 0, indicating the whole sentence).
"""
# For each sentence
for i in range(len(self.sentences)):
# Normalise extra white spaces
self.sentences[i] = re.sub(' +', ' ', self.sentences[i])
# Tokenize the current sentence in word/POS
sentence = self.sentences[i].split(' ')
# 1. Looping over the words and creating the nodes. Sentences are
# also converted to a list of tuples
for j in range(len(sentence)):
# Convert word/POS to (word, POS) tuple
word, pos = self.wordpos_to_tuple(sentence[j])
# Replace word/POS by (word, POS) tuple in the sentence
sentence[j] = (word.lower(), pos)
# Modify the POS tags of stopwords to exclude them
if sentence[j][0] in self.stopwords:
sentence[j] = (sentence[j][0], "STOPWORD")
# Add the word only if it belongs to one of the syntactic
# categories
if sentence[j][1] in self.syntactic_filter:
# Add node to the graph if not exists
if not self.graph.has_node(sentence[j]):
self.graph.add_node(sentence[j])
# 2. Create the edges between the nodes using co-occurencies
for j in range(len(sentence)):
# Get the first node
first_node = sentence[j]
# Switch to set the window for the whole sentence
max_window = window
if window < 1:
max_window = len(sentence)
# For the other words in the window
for k in range(j+1, min(len(sentence), j+max_window)):
# Get the second node
second_node = sentence[k]
# Check if nodes exists
if self.graph.has_node(first_node) and \
self.graph.has_node(second_node):
# Add edge if not exists
if not self.graph.has_edge(first_node, second_node):
self.graph.add_edge(first_node,second_node,weight=1)
# Else modify weight
else:
self.graph[first_node][second_node]['weight'] += 1
# Replace sentence by the list of tuples
self.sentences[i] = sentence
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def generate_candidates(self):
"""
Function to generate the keyphrase candidates from the set of related
sentences. Keyphrases candidates are the largest n-grams containing only
words from the defined syntactic categories.
"""
# For each sentence
for i in range(len(self.sentences)):
sentence = self.sentences[i]
# List for iteratively constructing a keyphrase candidate
candidate = []
# For each (word, pos) tuple in the sentence
for j in range(len(sentence)):
word, pos = sentence[j]
# If word is to be included in a candidate
if pos in self.syntactic_filter:
# Adds word to candidate
candidate.append(sentence[j])
# If a candidate keyphrase is in the buffer
elif len(candidate) > 0 and self.is_a_candidate(candidate):
# Add candidate
keyphrase = ' '.join(u[0] for u in candidate)
self.keyphrase_candidates[keyphrase] = candidate
# Flush the buffer
candidate = []
else:
# Flush the buffer
candidate = []
# Handle the last possible candidate
if len(candidate) > 0 and self.is_a_candidate(candidate):
# Add candidate
keyphrase = ' '.join(u[0] for u in candidate)
self.keyphrase_candidates[keyphrase] = candidate
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def is_a_candidate(self, keyphrase_candidate):
"""
Function to check if a keyphrase candidate is a valid one according to
the syntactic patterns.
"""
candidate_pattern = ''.join(u[1] for u in keyphrase_candidate)
for pattern in self.syntactic_patterns:
if not re.search(pattern, candidate_pattern):
return False
return True
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def undirected_TextRank(self, d=0.85, f_conv=0.0001):
"""
Implementation of the TextRank algorithm as described in
[mihalcea-tarau:2004:EMNLP]_. Node scores are computed iteratively until
convergence (a threshold is used, default is 0.0001). The dampling
factor is by default set to 0.85 as recommended in the article.
"""
# Initialise the maximum node difference for checking stability
max_node_difference = f_conv
# Initialise node scores to 1
self.word_scores = {}
for node in self.graph.nodes():
self.word_scores[node] = 1.0
# While the node scores are not stabilized
while (max_node_difference >= f_conv):
# Create a copy of the current node scores
current_node_scores = self.word_scores.copy()
# For each node I in the graph
for node_i in self.graph.nodes():
sum_Vj = 0
# For each node J connected to I
for node_j in self.graph.neighbors_iter(node_i):
wji = self.graph[node_j][node_i]['weight']
WSVj = current_node_scores[node_j]
sum_wjk = 0.0
# For each node K connected to J
for node_k in self.graph.neighbors_iter(node_j):
sum_wjk += self.graph[node_j][node_k]['weight']
sum_Vj += ( (wji * WSVj) / sum_wjk )
# Modify node score
self.word_scores[node_i] = (1 - d) + (d * sum_Vj)
# Compute the difference between old and new score
score_difference = math.fabs(self.word_scores[node_i] \
- current_node_scores[node_i])
max_node_difference = max(score_difference, score_difference)
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def score_keyphrase_candidates(self):
"""
Function to compute the score of each keyphrase candidate according to
the words it contains. The score of each keyphrase is calculated as the
sum of its word scores normalized by its length + 1.
"""
# Compute the score of each candidate according to its words
for keyphrase in self.keyphrase_candidates:
# Compute the sum of word scores for each candidate
keyphrase_score = 0.0
for word_pos_tuple in self.keyphrase_candidates[keyphrase]:
keyphrase_score += self.word_scores[word_pos_tuple]
# Normalise score by length
keyphrase_score /= (len(self.keyphrase_candidates[keyphrase]) + 1.0)
# Add score to the keyphrase candidates
self.keyphrase_scores[keyphrase] = keyphrase_score
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def cluster_keyphrase_candidates(self):
"""
Function to cluster keyphrase candidates and remove redundancy. A large
number of the generated keyphrase candidates are redundant. Some
keyphrases may be contained within larger ones, e.g. *giant tortoise*
and *Pinta Island giant tortoise*. To solve this problem, generated
keyphrases are clustered using word overlap. For each cluster, the
keyphrase with the highest score is selected.
"""
# Sort keyphrase candidates by length
descending = sorted(self.keyphrase_candidates,
key = lambda x: len(self.keyphrase_candidates[x]),
reverse=True)
# Initialize the cluster container
clusters = {}
# Loop over keyphrases by decreasing length
for keyphrase in descending:
found_cluster = False
# Create a set of words from the keyphrase
keyphrase_words = set(keyphrase.split(' '))
# Loop over existing clusters
for cluster in clusters:
# Create a set of words from the cluster representative
cluster_words = set(cluster.split(' '))
# Check if keyphrase words are all contained in the cluster
# representative words
if len(keyphrase_words.difference(cluster_words)) == 0 :
# Add keyphrase to cluster
clusters[cluster].append(keyphrase)
# Mark cluster as found
found_cluster = True
# If keyphrase does not fit into any existing cluster
if not found_cluster:
clusters[keyphrase] = [keyphrase]
# Initialize the best candidate cluster container
best_candidate_keyphrases = []
# Loop over the clusters to find the best keyphrases
for cluster in clusters:
# Find the best scored keyphrase candidate in the cluster
sorted_cluster = sorted(clusters[cluster],
key=lambda cluster: self.keyphrase_scores[cluster],
reverse=True)
best_candidate_keyphrases.append(sorted_cluster[0])
# Initialize the non redundant clustered keyphrases
non_redundant_keyphrases = []
# Sort best candidate by score
sorted_keyphrases = sorted(best_candidate_keyphrases,
key=lambda keyphrase: self.keyphrase_scores[keyphrase],
reverse=True)
# Last loop to remove redundancy in cluster best candidates
for keyphrase in sorted_keyphrases:
is_redundant = False
for prev_keyphrase in non_redundant_keyphrases:
if keyphrase in prev_keyphrase:
is_redundant = True
break
if not is_redundant:
non_redundant_keyphrases.append(keyphrase)
# Modify the keyphrase candidate dictionnaries according to the clusters
for keyphrase in self.keyphrase_candidates.keys():
# Remove candidate if not in cluster
if not keyphrase in non_redundant_keyphrases:
del self.keyphrase_candidates[keyphrase]
del self.keyphrase_scores[keyphrase]
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def rerank_nbest_compressions(self):
"""
Function that reranks the nbest compressions according to the keyphrases
they contain. The cummulative score (original score) is normalized by
(compression length * Sum of keyphrase scores).
"""
reranked_compressions = []
# Loop over the compression candidates
for cummulative_score, path in self.nbest_compressions:
# Generate the sentence form the path
compression = ' '.join([u[0] for u in path])
# Initialize total keyphrase score
total_keyphrase_score = 1.0
# Loop over the keyphrases and sum the scores
for keyphrase in self.keyphrase_candidates:
if keyphrase in compression:
total_keyphrase_score += self.keyphrase_scores[keyphrase]
score = ( cummulative_score / (len(path) * total_keyphrase_score) )
bisect.insort( reranked_compressions,
(score, path) )
return reranked_compressions
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def wordpos_to_tuple(self, word, delim='/'):
"""
This function converts a word/POS to a (word, POS) tuple. The character
used for separating word and POS can be specified (default is /).
"""
# Splitting word, POS using regex
m = re.match("^(.+)"+delim+"(.+)$", word)
# Extract the word information
token, POS = m.group(1), m.group(2)
# Return the tuple
return (token.lower(), POS)
#-B-----------------------------------------------------------------------B-
#-T-----------------------------------------------------------------------T-
def tuple_to_wordpos(self, wordpos_tuple, delim='/'):
"""
This function converts a (word, POS) tuple to word/POS. The character
used for separating word and POS can be specified (default is /).
"""
# Return the word +delim+ POS
return wordpos_tuple[0]+delim+wordpos_tuple[1]
#-B-----------------------------------------------------------------------B-
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
# ] Ending keyphrase_reranker class
#~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
| mit |
wanggang3333/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
decvalts/landlab | landlab/components/vegetation_ca/examples/Analysis_long_flat.py | 1 | 1778 | # Jai Sri Sainath!
# GRASS = 0; SHRUB = 1; TREE = 2; BARE = 3;
# SHRUBSEEDLING = 4; TREESEEDLING = 5
GRASS = 0
SHRUB = 1
TREE = 2
BARE = 3
SHRUBSEEDLING = 4
TREESEEDLING = 5
import numpy as np
import matplotlib.pyplot as plt
sim = 'WaterStress_15May15_'
CumWaterStress = np.load(sim+'CumWaterStress.npy')
P = np.load(sim+'P.npy')
Tb = np.load(sim+'Tb.npy')
Tr = np.load(sim+'Tr.npy')
yrs = np.load(sim+'Years.npy')
VegType = np.load(sim+'VegType.npy')
#MAP = np.load(sim+'MAP.npy')
n = P.shape[0] # Number of iterations
Time = np.empty(n)
Time[0] = 0
for x in range(1,n):
Time[x] = Time[x-1]+(Tb[x]+Tr[x])/(24.*365)
grass_cov = np.empty(yrs)
shrub_cov = np.empty(yrs)
tree_cov = np.empty(yrs)
grid_size = float(VegType.shape[1])
for x in range(0,yrs):
grass_cov[x] = (VegType[x][VegType[x] == GRASS].size/grid_size) * 100
shrub_cov[x] = (VegType[x][VegType[x] == SHRUB].size/grid_size) * 100 + \
(VegType[x][VegType[x] == SHRUBSEEDLING].size/grid_size) * 100
tree_cov[x] = (VegType[x][VegType[x] == TREE].size/grid_size) * 100 + \
(VegType[x][VegType[x] == TREESEEDLING].size/grid_size) * 100
years = range(0,yrs)
pic = 0
#plt.figure(pic)
#plt.plot(years, MAP[0:yrs])
#plt.xlim([0, yrs])
#plt.xlabel('Time in years')
#plt.ylabel('Mean Annual Precipitation in mm')
#plt.title('MAP')
pic += 1
plt.figure(pic)
plt.plot(years, CumWaterStress[0:yrs,10])
plt.xlim([0, yrs])
plt.xlabel('Years')
plt.ylabel('Cumulative Water Stress')
pic += 1
plt.figure(pic)
plt.plot(years, grass_cov, '-g', label = 'Grass')
plt.hold(True)
plt.plot(years, shrub_cov, '-r', label = 'Shrub')
plt.hold(True)
plt.plot(years, tree_cov, '-k', label = 'Tree')
plt.ylabel(' % Coverage ')
plt.xlabel('Time in years' )
plt.legend(loc = 0)
plt.show()
| mit |
Winand/pandas | pandas/io/packers.py | 3 | 27461 | """
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical, CategoricalIndex)
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer, _stringify_path
from pandas.core.internals import BlockManager, make_block, _safe_reshape
import pandas.core.internals as internals
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64)) or obj is NaT:
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'freq': freq,
u'tz': tz}
if obj is NaT:
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
freq = obj[u'freq'] if 'freq' in obj else obj[u'offset']
return Timestamp(obj[u'value'], tz=obj[u'tz'], freq=freq)
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']]._from_ordinals(data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=pd_dtype,
name=obj[u'name'])
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = _safe_reshape(unconvert(
b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']), b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
| bsd-3-clause |
RTHMaK/RPGOne | scipy-2017-sklearn-master/notebooks/figures/plot_rbf_svm_parameters.py | 19 | 2018 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| apache-2.0 |
trungnt13/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/graphics/boxplots.py | 30 | 16437 | """Variations on boxplots."""
# Author: Ralf Gommers
# Based on code by Flavio Coelho and Teemu Ikonen.
from statsmodels.compat.python import zip
import numpy as np
from scipy.stats import gaussian_kde
from . import utils
__all__ = ['violinplot', 'beanplot']
def violinplot(data, ax=None, labels=None, positions=None, side='both',
show_boxplot=True, plot_opts={}):
"""Make a violin plot of each dataset in the `data` sequence.
A violin plot is a boxplot combined with a kernel density estimate of the
probability density function per point.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
show_boxplot : bool, optional
Whether or not to show normal box plots on top of the violins.
Default is True.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'violin_fc', MPL color. Fill color for violins. Default is 'y'.
- 'violin_ec', MPL color. Edge color for violins. Default is 'k'.
- 'violin_lw', scalar. Edge linewidth for violins. Default is 1.
- 'violin_alpha', float. Transparancy of violins. Default is 0.5.
- 'cutoff', bool. If True, limit violin range to data range.
Default is False.
- 'cutoff_val', scalar. Where to cut off violins if `cutoff` is
True. Default is 1.5 standard deviations.
- 'cutoff_type', {'std', 'abs'}. Whether cutoff value is absolute,
or in standard deviations. Default is 'std'.
- 'violin_width' : float. Relative width of violins. Max available
space is 1, default is 0.8.
- 'label_fontsize', MPL fontsize. Adjusts fontsize only if given.
- 'label_rotation', scalar. Adjusts label rotation only if given.
Specify in degrees.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
beanplot : Bean plot, builds on `violinplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
Notes
-----
The appearance of violins can be customized with `plot_opts`. If
customization of boxplot elements is required, set `show_boxplot` to False
and plot it on top of the violins by calling the Matplotlib `boxplot`
function directly. For example::
violinplot(data, ax=ax, show_boxplot=False)
ax.boxplot(data, sym='cv', whis=2.5)
It can happen that the axis labels or tick labels fall outside the plot
area, especially with rotated labels on the horizontal axis. With
Matplotlib 1.1 or higher, this can easily be fixed by calling
``ax.tight_layout()``. With older Matplotlib one has to use ``plt.rc`` or
``plt.rcParams`` to fix this, for example::
plt.rc('figure.subplot', bottom=0.25)
violinplot(data, ax=ax)
References
----------
J.L. Hintze and R.D. Nelson, "Violin Plots: A Box Plot-Density Trace
Synergism", The American Statistician, Vol. 52, pp.181-84, 1998.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.violinplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_violinplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('violin_width', 0.8) / 2.])
# Plot violins.
for pos_data, pos in zip(data, positions):
xvals, violin = _single_violin(ax, pos, pos_data, width, side,
plot_opts)
if show_boxplot:
ax.boxplot(data, notch=1, positions=positions, vert=1)
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _single_violin(ax, pos, pos_data, width, side, plot_opts):
""""""
def _violin_range(pos_data, plot_opts):
"""Return array with correct range, with which violins can be plotted."""
cutoff = plot_opts.get('cutoff', False)
cutoff_type = plot_opts.get('cutoff_type', 'std')
cutoff_val = plot_opts.get('cutoff_val', 1.5)
s = 0.0
if not cutoff:
if cutoff_type == 'std':
s = cutoff_val * np.std(pos_data)
else:
s = cutoff_val
x_lower = kde.dataset.min() - s
x_upper = kde.dataset.max() + s
return np.linspace(x_lower, x_upper, 100)
pos_data = np.asarray(pos_data)
# Kernel density estimate for data at this position.
kde = gaussian_kde(pos_data)
# Create violin for pos, scaled to the available space.
xvals = _violin_range(pos_data, plot_opts)
violin = kde.evaluate(xvals)
violin = width * violin / violin.max()
if side == 'both':
envelope_l, envelope_r = (-violin + pos, violin + pos)
elif side == 'right':
envelope_l, envelope_r = (pos, violin + pos)
elif side == 'left':
envelope_l, envelope_r = (-violin + pos, pos)
else:
msg = "`side` parameter should be one of {'left', 'right', 'both'}."
raise ValueError(msg)
# Draw the violin.
ax.fill_betweenx(xvals, envelope_l, envelope_r,
facecolor=plot_opts.get('violin_fc', '#66c2a5'),
edgecolor=plot_opts.get('violin_ec', 'k'),
lw=plot_opts.get('violin_lw', 1),
alpha=plot_opts.get('violin_alpha', 0.5))
return xvals, violin
def _set_ticks_labels(ax, data, labels, positions, plot_opts):
"""Set ticks and labels on horizontal axis."""
# Set xticks and limits.
ax.set_xlim([np.min(positions) - 0.5, np.max(positions) + 0.5])
ax.set_xticks(positions)
label_fontsize = plot_opts.get('label_fontsize')
label_rotation = plot_opts.get('label_rotation')
if label_fontsize or label_rotation:
from matplotlib.artist import setp
if labels is not None:
if not len(labels) == len(data):
msg = "Length of `labels` should equal length of `data`."
raise ValueError(msg)
xticknames = ax.set_xticklabels(labels)
if label_fontsize:
setp(xticknames, fontsize=label_fontsize)
if label_rotation:
setp(xticknames, rotation=label_rotation)
return
def beanplot(data, ax=None, labels=None, positions=None, side='both',
jitter=False, plot_opts={}):
"""Make a bean plot of each dataset in the `data` sequence.
A bean plot is a combination of a `violinplot` (kernel density estimate of
the probability density function per point) with a line-scatter plot of all
individual data points.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
jitter : bool, optional
If True, jitter markers within violin instead of plotting regular lines
around the center. This can be useful if the data is very dense.
plot_opts : dict, optional
A dictionary with plotting options. All the options for `violinplot`
can be specified, they will simply be passed to `violinplot`. Options
specific to `beanplot` are:
- 'violin_width' : float. Relative width of violins. Max available
space is 1, default is 0.8.
- 'bean_color', MPL color. Color of bean plot lines. Default is 'k'.
Also used for jitter marker edge color if `jitter` is True.
- 'bean_size', scalar. Line length as a fraction of maximum length.
Default is 0.5.
- 'bean_lw', scalar. Linewidth, default is 0.5.
- 'bean_show_mean', bool. If True (default), show mean as a line.
- 'bean_show_median', bool. If True (default), show median as a
marker.
- 'bean_mean_color', MPL color. Color of mean line. Default is 'b'.
- 'bean_mean_lw', scalar. Linewidth of mean line, default is 2.
- 'bean_mean_size', scalar. Line length as a fraction of maximum length.
Default is 0.5.
- 'bean_median_color', MPL color. Color of median marker. Default
is 'r'.
- 'bean_median_marker', MPL marker. Marker type, default is '+'.
- 'jitter_marker', MPL marker. Marker type for ``jitter=True``.
Default is 'o'.
- 'jitter_marker_size', int. Marker size. Default is 4.
- 'jitter_fc', MPL color. Jitter marker face color. Default is None.
- 'bean_legend_text', str. If given, add a legend with given text.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
violinplot : Violin plot, also used internally in `beanplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
References
----------
P. Kampstra, "Beanplot: A Boxplot Alternative for Visual Comparison of
Distributions", J. Stat. Soft., Vol. 28, pp. 1-9, 2008.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.beanplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_beanplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
violin_width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('violin_width', 0.8) / 2.])
bean_width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('bean_size', 0.5) / 2.])
bean_mean_width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('bean_mean_size', 0.5) / 2.])
legend_txt = plot_opts.get('bean_legend_text', None)
for pos_data, pos in zip(data, positions):
# Draw violins.
xvals, violin = _single_violin(ax, pos, pos_data, violin_width, side, plot_opts)
if jitter:
# Draw data points at random coordinates within violin envelope.
jitter_coord = pos + _jitter_envelope(pos_data, xvals, violin, side)
ax.plot(jitter_coord, pos_data, ls='',
marker=plot_opts.get('jitter_marker', 'o'),
ms=plot_opts.get('jitter_marker_size', 4),
mec=plot_opts.get('bean_color', 'k'),
mew=1, mfc=plot_opts.get('jitter_fc', 'none'),
label=legend_txt)
else:
# Draw bean lines.
ax.hlines(pos_data, pos - bean_width, pos + bean_width,
lw=plot_opts.get('bean_lw', 0.5),
color=plot_opts.get('bean_color', 'k'),
label=legend_txt)
# Show legend if required.
if legend_txt is not None:
_show_legend(ax)
legend_txt = None # ensure we get one entry per call to beanplot
# Draw mean line.
if plot_opts.get('bean_show_mean', True):
ax.hlines(np.mean(pos_data), pos - bean_mean_width, pos + bean_mean_width,
lw=plot_opts.get('bean_mean_lw', 2.),
color=plot_opts.get('bean_mean_color', 'b'))
# Draw median marker.
if plot_opts.get('bean_show_median', True):
ax.plot(pos, np.median(pos_data),
marker=plot_opts.get('bean_median_marker', '+'),
color=plot_opts.get('bean_median_color', 'r'))
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _jitter_envelope(pos_data, xvals, violin, side):
"""Determine envelope for jitter markers."""
if side == 'both':
low, high = (-1., 1.)
elif side == 'right':
low, high = (0, 1.)
elif side == 'left':
low, high = (-1., 0)
else:
raise ValueError("`side` input incorrect: %s" % side)
jitter_envelope = np.interp(pos_data, xvals, violin)
jitter_coord = jitter_envelope * np.random.uniform(low=low, high=high,
size=pos_data.size)
return jitter_coord
def _show_legend(ax):
"""Utility function to show legend."""
leg = ax.legend(loc=1, shadow=True, fancybox=True, labelspacing=0.2,
borderpad=0.15)
ltext = leg.get_texts()
llines = leg.get_lines()
frame = leg.get_frame()
from matplotlib.artist import setp
setp(ltext, fontsize='small')
setp(llines, linewidth=1)
| bsd-3-clause |
evan-magnusson/dynamic | Data/Calibration/Firm Calibration/parameters/national_income/processing/read_income_data.py | 6 | 5260 | '''
-------------------------------------------------------------------------------
Date created: 5/12/2015
Last updated 5/12/2015
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
Packages:
-------------------------------------------------------------------------------
'''
import os.path
import sys
sys.path.append(os.path.abspath("N:\Lott, Sherwin\Other Calibration\Program"))
import numpy as np
import pandas as pd
import xlrd
#
import naics_processing as naics
'''
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
'''
# Defining constant names:
BUS_INC = "Business_Income"
FIN_INC = "Financial_Income"
INT_INC = "Interest_Income"
#
def calc_bus_inc(tree):
for i in tree.enum_inds:
i.append_dfs((BUS_INC, pd.DataFrame(np.zeros((1,1)), columns = [BUS_INC])))
#
for i in tree.enum_inds:
fin_inc = i.data.dfs[FIN_INC][FIN_INC][0]
int_inc = i.data.dfs[INT_INC][INT_INC][0]
i.data.dfs[BUS_INC][BUS_INC][0] = fin_inc - int_inc
#tree = naics.load_naics("N:\\Lott, Sherwin\\Other Calibration\\Program\\depreciation\\data\\2012_NAICS_Codes.csv")
def load_nipa_inc_ind(data_folder, tree = None):
inc_ind_file = os.path.abspath(data_folder + "\\National_Income--Industry.xls")
inc_ind_cross_file = os.path.abspath(data_folder + "\\National_Income--Industry_Crosswalk.csv")
#
data = load_nipa_ind(inc_ind_file, inc_ind_cross_file)
data.columns = ["NAICS_Code", FIN_INC]
#
conversion_factor = 10 ** 9
for i in xrange(0, data.shape[0]):
data[FIN_INC][i] *= conversion_factor
#
if tree == None:
return data
naics_data_to_tree(tree, data, FIN_INC)
def load_nipa_int_ind(data_folder, tree = None):
int_ind_file = os.path.abspath(data_folder + "\\Interest--Industry.xls")
int_ind_cross_file = os.path.abspath(data_folder + "\\Interest--Industry_Crosswalk.csv")
#
data = load_nipa_ind(int_ind_file, int_ind_cross_file)
data.columns = ["NAICS_Code", INT_INC]
#
conversion_factor = 10.0 ** 6
for i in xrange(0, data.shape[0]):
data[INT_INC][i] *= conversion_factor
#
if tree == None:
return data
naics_data_to_tree(tree, data, INT_INC)
def load_nipa_ind(data_file, cross_file):
#data_folder = "N:\\Lott, Sherwin\\Other Calibration\\Program\\national_income\\data"
data_book = xlrd.open_workbook(data_file)
data_sht = data_book.sheet_by_index(0)
#
data_cross = pd.read_csv(cross_file)
#data_cross = data_cross.fillna(-1)
#data_cross = pd.DataFrame(data_cross[data_cross["NAICS Code:"] != -1])
output = np.zeros(data_cross.shape[0])
start_pos = naics.search_ws(data_sht, "Line", 25, True, [0,0], True)
for i in xrange(start_pos[0]+1, data_sht.nrows):
if(str(data_sht.cell_value(i,start_pos[1])) == "1"):
start_pos[0] = i
break
cur_row = start_pos[0]
ind_col = start_pos[1] + 1
data_col = data_sht.ncols - 1
for i in xrange(0, data_sht.ncols):
try:
float(data_sht.cell_value(cur_row, data_col))
break
except ValueError:
data_col -= 1
for i in xrange(0, data_cross.shape[0]):
for j in xrange(start_pos[0], data_sht.nrows):
try:
if(data_cross["Industry"][i] in data_sht.cell_value(cur_row, ind_col)):
output[i] = data_sht.cell_value(cur_row, data_col)
cur_row = start_pos[0] + ((cur_row+1-start_pos[0]) % (data_sht.nrows-start_pos[0]))
break
cur_row = start_pos[0] + ((cur_row+1-start_pos[0]) % (data_sht.nrows-start_pos[0]))
except ValueError:
cur_row = start_pos[0] + ((cur_row+1-start_pos[0]) % (data_sht.nrows-start_pos[0]))
return pd.DataFrame(np.column_stack((data_cross["NAICS_Code"], output)), columns = ["NAICS Codes:", ""])
def naics_data_to_tree(tree, df, df_name = "", bp_tree = None, bp_df = None):
#
for i in tree.enum_inds:
i.append_dfs((df_name, pd.DataFrame(np.zeros((1,len(df.columns[1:]))),
columns = df.columns[1:])))
#
enum_index = 0
#
for i in xrange(0, len(tree.enum_inds)):
cur_ind = tree.enum_inds[i]
cur_dfs = cur_ind.data.dfs
tot_share = 0
for j in xrange(0, df.shape[0]):
if df["NAICS_Code"][j] != df["NAICS_Code"][j]:
continue
df_code = df["NAICS_Code"][j]
df_code = df_code.split(".")
cur_share = naics.compare_codes(df_code, cur_dfs["Codes:"].iloc[:,0])
if cur_share == 0:
continue
tot_share += cur_share
#
for k in xrange(1, df.shape[1]):
cur_dfs[df_name].iloc[0,k-1] = cur_share * df.iloc[j,k]
#
if tot_share == 1:
break
enum_index = (enum_index+1) % len(tree.enum_inds)
| mit |
vybstat/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
MechCoder/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/datasets/base.py | 22 | 22973 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer():
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
ericnam808/sjsu-cs185c-yearofmusic-ml | nb_classifier.py | 1 | 3995 | """
M.Layman
E Nam
CS 185 HW 5 - FA 2015
Naive bayesian classifier using sklearn.
"""
from numpy import *
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn import metrics
import csv
import logging as log
log.basicConfig(format='[%(levelname)s] %(message)s', level=log.DEBUG)
def splice_delimited_columns(data_filepath, columns):
features = []
with open(data_filepath, 'rb') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
features.append(
map(lambda i: line[i], columns))
return features
def splice_columns(arr, indexes):
if len(indexes) == 1:
return map(lambda ar: ar[indexes[0]].lower(), arr)
return map(
lambda ar: reduce(
lambda x,y: x + ' - ' + y,
map(lambda i: ar[i].lower(), indexes)), arr)
def build_unique_list(iter_):
return list(set(iter_))
def index_list(iter_, codes):
return map(lambda v: codes.index(v), iter_)
def train_classifer(classifer, training_file, test_file, columns, class_col, data_cols):
log.info("Reading training file '%s'..." % training_file)
raw = splice_delimited_columns(
training_file, columns)
log.info("Success! %d raw lines." % len(raw))
log.info('Extracting classes and features...')
raw_classes = splice_columns(raw,[class_col])
training_features = splice_columns(raw, data_cols)
print "Raw class samples:"
print raw_classes[:3]
print "Raw training feature samples:"
print training_features[:3]
# account for weird, non-unicode chars
count_vect = CountVectorizer(
decode_error='replace',
strip_accents='unicode',
binary=True)
X_train_counts = count_vect.fit_transform(training_features)
log.info('Vocabulary list: %d unique words.' % len(count_vect.get_feature_names()))
# Vectorize by tf-idf
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
log.info('Fitting classifer. This may take some time...')
classifer.fit(X_train_tfidf.toarray(), raw_classes)
log.info('Fit successful!')
### Validation Section ###
log.info('### Start Test Validation ###')
log.info("Reading test file '%s'..." % test_file)
test_raw = splice_delimited_columns(
test_file, columns)
test_classes = splice_columns(test_raw,[class_col])
test_features = splice_columns(test_raw, data_cols)
print "Test class samples:"
print test_classes[:3]
print "Test feature samples:"
print test_features[:3]
log.info('Vectorizing test features...')
X_new_counts = count_vect.transform(test_features)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
log.info('Running predictions. This may also take up your time...')
results = classifer.predict(X_new_tfidf.toarray())
log.info('Success! Generating report.')
report = metrics.classification_report(
test_classes, results, labels=None)
return report
def main():
columns = [0,2,3]
class_col = 0
feature_cols = [1,2]
def execute_test(title, clf, output_filepath):
print '## Executing Classifer "%s" ##' % title
report = train_classifer(
clf,
r'data/debug_combo2.txt',
r'data/debug_combo2_test.txt',
columns,
class_col,
feature_cols)
print 'Results...'
print report
with open(output_filepath, 'w') as outfile:
outfile.write(report)
classifers = [
('MultinomialNB - Artist_Title Year Classifier',
MultinomialNB(alpha=.01),
r'results/nb_result_2.txt') ]
# execute test
map(lambda x: execute_test(x[0],x[1],x[2]), classifers)
log.info('Normal program exit.')
main()
| mit |
MadsJensen/malthe_alpha_project | ica_manual.py | 1 | 4556 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 8 14:45:02 2014.
@author: mje
"""
import mne
import socket
import numpy as np
import os
from mne.io import Raw
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
import matplotlib.pyplot as plt
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
save_folder = data_path + "filter_ica_data/"
maxfiltered_folder = data_path + "maxfiltered_data/"
# SETTINGS
n_jobs = 1
reject = dict(grad=5000e-13, # T / m (gradiometers)
mag=5e-12, # T (magnetometers)
eeg=300e-6) # uVolts (EEG)
l_freq, h_freq = 1, 98 # High and low frequency setting for the band pass
n_freq = 50 # notch filter frequency
decim = 7 # decim value
subject = "0022"
os.chdir(save_folder)
# Functions #
"""Function will compute ICA on raw and apply the ICA.
params:
subject : str
the subject id to be loaded
"""
raw = Raw(save_folder + "%s_filtered_data_mc_raw_tsss.fif" % subject,
preload=True)
# ICA Part
ica = ICA(n_components=0.95, method='fastica', max_iter=256)
picks = mne.pick_types(raw.info, meg=True, eeg=True,
stim=False, exclude='bads')
ica.fit(raw, picks=picks, decim=decim, reject=reject)
# maximum number of components to reject
n_max_ecg, n_max_eog = 3, 1
##########################################################################
# 2) identify bad components by analyzing latent sources.
title = 'Sources related to %s artifacts (red) for sub: %s'
# generate ECG epochs use detection via phase statistics
ecg_epochs = create_ecg_epochs(raw, ch_name="ECG002",
tmin=-.5, tmax=.5, picks=picks)
n_ecg_epochs_found = len(ecg_epochs.events)
sel_ecg_epochs = np.arange(0, n_ecg_epochs_found, 10)
ecg_epochs = ecg_epochs[sel_ecg_epochs]
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
fig = ica.plot_scores(scores, exclude=ecg_inds,
title=title % ('ecg', subject))
fig.savefig(save_folder + "pics/%s_ecg_scores.png" % subject)
if ecg_inds:
show_picks = np.abs(scores).argsort()[::-1][:5]
fig = ica.plot_sources(raw, show_picks, exclude=ecg_inds,
title=title % ('ecg', subject), show=False)
fig.savefig(save_folder + "pics/%s_ecg_sources.png" % subject)
fig = ica.plot_components(ecg_inds, title=title % ('ecg', subject),
colorbar=True)
fig.savefig(save_folder + "pics/%s_ecg_component.png" % subject)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
# estimate average artifact
ecg_evoked = ecg_epochs.average()
del ecg_epochs
# plot ECG sources + selection
fig = ica.plot_sources(ecg_evoked, exclude=ecg_inds)
fig.savefig(save_folder + "pics/%s_ecg_sources_ave.png" % subject)
# plot ECG cleaning
ica.plot_overlay(ecg_evoked, exclude=ecg_inds)
fig.savefig(save_folder + "pics/%s_ecg_sources_clean_ave.png" % subject)
# DETECT EOG BY CORRELATION
# VERTICAL EOG
eog_epochs = create_eog_epochs(raw, ch_name="EOG001")
eog_inds, scores = ica.find_bads_eog(raw)
fig = ica.plot_scores(scores, exclude=eog_inds,
title=title % ('eog', subject))
fig.savefig(save_folder + "pics/%s_eog_scores.png" % subject)
fig = ica.plot_components(eog_inds, title=title % ('eog', subject),
colorbar=True)
fig.savefig(save_folder + "pics/%s_eog_component.png" % subject)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
del eog_epochs
# HORIZONTAL EOG
eog_epochs = create_eog_epochs(raw, ch_name="EOG003")
eog_inds, scores = ica.find_bads_eog(raw)
fig = ica.plot_scores(scores, exclude=eog_inds,
title=title % ('eog', subject))
fig.savefig(save_folder + "pics/%s_heog_scores.png" % subject)
fig = ica.plot_components(eog_inds, title=title % ('heog', subject),
colorbar=True)
fig.savefig(save_folder + "pics/%s_eog_component.png" % subject)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
del eog_epochs
##########################################################################
# Apply the solution to Raw, Epochs or Evoked like this:
raw_ica = ica.apply(raw, copy=False)
ica.save(save_folder + "%s-ica.fif" % subject) # save ICA componenets
# Save raw with ICA removed
raw_ica.save(save_folder + "%s_filtered_ica_mc_raw_tsss.fif" % subject,
overwrite=True)
plt.close("all")
| mit |
quheng/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
cuemacro/finmarketpy | finmarketpy/network_analysis/learn_network_structure.py | 1 | 4838 | # Project: finmarketpy project
# Filename: learn_network_structure
# Objective: compute a network graph for a group of asset return time series
# Created: 2019-11-02 12:05
# Version: 0.0
# Author: FS
__author__ = 'fs'
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
from sklearn import cluster, covariance, manifold
def learn_network_structure(ts_returns_data, names, alphas=4, cv=5, mode='cd',
assume_centered = False,
n_components=2, n_neighbors=5,
eigen_solver="dense", method='standard',
neighbors_algorithm="auto",
random_state = None, n_jobs=None,
standardise=False):
"""
Parameters
----------
ts_returns_data : array-like of shape [n_samples, n_instruments]
time series matrix of returns
names : array-like of shape [n_samples, 1]
Individual names of the financial instrument
alphas : int or positive float, optional
Number of points on the grids to be used
cv : int, optional
Number of folds for cross-validation splitting strategy
mode : str, optional
Solver to use to compute the graph
assume_centered : bool, optional
Centre the data if False.
n_components : int
Number of components for the manifold
n_neighbors: int
Number of neighbours to consider for each point
eigen_solver : str
Algorithm to compute eigenvalues
method : str
Algorithm to use for local linear embedding
neighbors_algorithm : str
Algorithm to use for nearest neighbours search
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator.
If RandomState instance, random_state is the random number generator.
If None, the random number generator is the RandomState instance used by np.random.
Used when eigen_solver == ‘arpack’
n_jobs : int or None, optional
number of parallel jobs to run
standardise : bool
standardise data if True
Returns : sklearn.covariance.graph_lasso_.GraphicalLassoCV
sklearn.manifold.locally_linear.LocallyLinearEmbedding
array-like of shape [n_components, n_instruments]
Transformed embedding vectors
array-like of shape [n_instruments, 1]
numeric identifier of each cluster
-------
"""
if not isinstance(ts_returns_data, (np.ndarray, np.generic)):
raise TypeError("ts_returns_data must be of class ndarray")
# learn graphical structure
edge_model = covariance.GraphicalLassoCV(alphas=alphas, cv=cv, mode=mode,
assume_centered=assume_centered)
edge_model.fit(ts_returns_data)
# cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# find low-dimension embedding - useful for 2D plane visualisation
node_position_model = manifold.LocallyLinearEmbedding(
n_components=n_components, eigen_solver=eigen_solver,
n_neighbors=n_neighbors, method=method,
neighbors_algorithm=neighbors_algorithm,
random_state=random_state, n_jobs=n_jobs)
embedding = node_position_model.fit_transform(ts_returns_data.T).T
if standardise:
# standardise returns
standard_ret = ts_returns_data.copy()
standard_ret /= ts_returns_data.std(axis=0)
# learn graph model
edge_model.fit(standard_ret)
# cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# find low-dimension embedding - useful for 2D plane visualisation
node_position_model = manifold.LocallyLinearEmbedding(
n_components=n_components, eigen_solver=eigen_solver,
n_neighbors=n_neighbors, method=method,
neighbors_algorithm=neighbors_algorithm,
random_state=random_state, n_jobs=n_jobs)
embedding = node_position_model.fit_transform(ts_returns_data.T).T
return edge_model, node_position_model, embedding, labels
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.