| repo_name
				 stringlengths 6 112 | path
				 stringlengths 4 204 | copies
				 stringlengths 1 3 | size
				 stringlengths 4 6 | content
				 stringlengths 714 810k | license
				 stringclasses 15
				values | 
|---|---|---|---|---|---|
| 
	friend0/tower | 
	doc/source/conf.py | 
	1 | 
	10638 | 
	# -*- coding: utf-8 -*-
#
# world engine documentation build configuration file, created by
# sphinx-quickstart on Mon Nov  2 21:58:16 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
import shlex
from mock import Mock as MagicMock
# print os.path.abspath('../../')
# print os.path.abspath('../world_engine/docs/img/')
# print os.path.abspath('../world_engine/world_engine/mapping/')
# print os.path.abspath('../world_engine/world_engine/server/')
class Mock(MagicMock):
    @classmethod
    def __getattr__(cls, name):
            return Mock()
MOCK_MODULES = ['rasterio', 'numpy', 'pynmea', 'osgeo', 'matplotlib', 'matplotlib.pyplot', 'geographiclib', 'geographiclib.geodesic', 'matplotlib']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
print(os.path.abspath('../img'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../img'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.autodoc',
    'sphinx.ext.intersphinx',
    'sphinx.ext.todo',
    'sphinx.ext.coverage',
    'sphinx.ext.mathjax',
    'sphinx.ext.ifconfig',
    'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'world engine'
copyright = u'2015, Ryan A. Rodriguez'
author = u'Ryan A. Rodriguez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.2'
# The full version, including alpha/beta/rc tags.
release = '0.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_sidebars = {
   '**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html']
}
# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'worldenginedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
  (master_doc, 'worldengine.tex', u'world engine Documentation',
   u'Ryan A. Rodriguez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    (master_doc, 'worldengine', u'world engine Documentation',
     [author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
  (master_doc, 'worldengine', u'world engine Documentation',
   author, 'worldengine', 'One line description of project.',
   'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
 | 
	isc | 
| 
	sergeykolychev/mxnet | 
	example/autoencoder/data.py | 
	27 | 
	1272 | 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
    np.random.seed(1234) # set seed for deterministic ordering
    data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
    data_path = os.path.join(data_path, '../../data')
    mnist = fetch_mldata('MNIST original', data_home=data_path)
    p = np.random.permutation(mnist.data.shape[0])
    X = mnist.data[p].astype(np.float32)*0.02
    Y = mnist.target[p]
    return X, Y
 | 
	apache-2.0 | 
| 
	madjelan/scikit-learn | 
	sklearn/feature_extraction/tests/test_dict_vectorizer.py | 
	276 | 
	3790 | 
	# Authors: Lars Buitinck <[email protected]>
#          Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
                                   assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
    D = [{"foo": 1, "bar": 3},
         {"bar": 4, "baz": 2},
         {"bar": 1, "quux": 1, "quuux": 2}]
    for sparse in (True, False):
        for dtype in (int, np.float32, np.int16):
            for sort in (True, False):
                for iterable in (True, False):
                    v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
                    X = v.fit_transform(iter(D) if iterable else D)
                    assert_equal(sp.issparse(X), sparse)
                    assert_equal(X.shape, (3, 5))
                    assert_equal(X.sum(), 14)
                    assert_equal(v.inverse_transform(X), D)
                    if sparse:
                        # CSR matrices can't be compared for equality
                        assert_array_equal(X.A, v.transform(iter(D) if iterable
                                                            else D).A)
                    else:
                        assert_array_equal(X, v.transform(iter(D) if iterable
                                                          else D))
                    if sort:
                        assert_equal(v.feature_names_,
                                     sorted(v.feature_names_))
def test_feature_selection():
    # make two feature dicts with two useful features and a bunch of useless
    # ones, in terms of chi2
    d1 = dict([("useless%d" % i, 10) for i in range(20)],
              useful1=1, useful2=20)
    d2 = dict([("useless%d" % i, 10) for i in range(20)],
              useful1=20, useful2=1)
    for indices in (True, False):
        v = DictVectorizer().fit([d1, d2])
        X = v.transform([d1, d2])
        sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
        v.restrict(sel.get_support(indices=indices), indices=indices)
        assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
    D_in = [{"version": "1", "ham": 2},
            {"version": "2", "spam": .3},
            {"version=3": True, "spam": -1}]
    v = DictVectorizer()
    X = v.fit_transform(D_in)
    assert_equal(X.shape, (3, 5))
    D_out = v.inverse_transform(X)
    assert_equal(D_out[0], {"version=1": 1, "ham": 2})
    names = v.get_feature_names()
    assert_true("version=2" in names)
    assert_false("version" in names)
def test_unseen_or_no_features():
    D = [{"camelot": 0, "spamalot": 1}]
    for sparse in [True, False]:
        v = DictVectorizer(sparse=sparse).fit(D)
        X = v.transform({"push the pram a lot": 2})
        if sparse:
            X = X.toarray()
        assert_array_equal(X, np.zeros((1, 2)))
        X = v.transform({})
        if sparse:
            X = X.toarray()
        assert_array_equal(X, np.zeros((1, 2)))
        try:
            v.transform([])
        except ValueError as e:
            assert_in("empty", str(e))
def test_deterministic_vocabulary():
    # Generate equal dictionaries with different memory layouts
    items = [("%03d" % i, i) for i in range(1000)]
    rng = Random(42)
    d_sorted = dict(items)
    rng.shuffle(items)
    d_shuffled = dict(items)
    # check that the memory layout does not impact the resulting vocabulary
    v_1 = DictVectorizer().fit([d_sorted])
    v_2 = DictVectorizer().fit([d_shuffled])
    assert_equal(v_1.vocabulary_, v_2.vocabulary_)
 | 
	bsd-3-clause | 
| 
	Trust-Code/addons-yelizariev | 
	sugarcrm_migration/wizard/upload.py | 
	16 | 
	3753 | 
	from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import tools
import logging
_logger = logging.getLogger(__name__)
import base64
import tempfile 
try:
    import MySQLdb
    import MySQLdb.cursors
    from pandas import DataFrame
except ImportError:
    pass
from ..import_sugarcrm import import_sugarcrm
from ..import_kashflow import import_kashflow
import tarfile
import shutil
try:
    from cStringIO import StringIO
except ImportError:
    from StringIO import StringIO
import os
import glob
class sugarcrm_migration_upload(osv.TransientModel):
    _name = "sugarcrm_migration.upload"
    _description = "Upload dumps"
    _columns = {
        'sugarcrm_file': fields.char('Sugarcrm file (*.tar.gz)', help='Path on server'),
        'kashflow_file': fields.char('Kashflow file (*.tar.gz)', help='Path on server'),
        'db_host': fields.char('MySQL Host'),
        'db_port': fields.char('MySQL Port'),
        'db_name': fields.char('MySQL Database'),
        'db_user': fields.char('MySQL User'),
        'db_passwd': fields.char('MySQL Password'),
        }
    _defaults = {
        'db_host': 'localhost',
        'db_port': '3306',
        'db_name': 'test',
        'db_user': 'test',
        'db_passwd': 'test',
        }
    def upload_button(self, cr, uid, ids, context=None):
        record = self.browse(cr, uid, ids[0])
        self.kashflow(record, cr, uid)
        #self.sugarcrm(record, cr, uid)
        return True
    def sugarcrm(self, record, cr, uid):
        #if not record.sugarcrm_file:
        #    return
        #unzip files
        files = []
        tmp_dir = None
        if record.sugarcrm_file:
            tmp_dir,files = self.unzip_file(record.sugarcrm_file.strip())
        instance = import_sugarcrm(self.pool, cr, uid,
                                   'sugarcrm', #instance_name
                                   'sugarcrm_migration', # module_name
                                   context={'db_host': record.db_host,
                                            'db_port': record.db_port,
                                            'db_user': record.db_user,
                                            'db_passwd': record.db_passwd,
                                            'db_name': record.db_name,
                                            'db_dump_fies': files
                                            }
                                   )
        try:
            shutil.rmtree(tmp_dir)
        except:
            pass
        
        instance.run()
        return instance
    def kashflow(self, record, cr, uid):
        if not record.kashflow_file:
            return
        # unzip files
        tmp,files = self.unzip_file(record.kashflow_file.strip(), pattern='*.csv')
        _logger.info('kashflow files: %s'%files)
        # map data and save to base_import.import
        instance = import_kashflow(self.pool, cr, uid,
                                   'kashflow', #instance_name
                                   'sugarcrm_migration', #module_name
                                   context = {'csv_files': files,
                                              'sugarcrm_instance_name':'sugarcrm'
                                              }
                                   )
        instance.run()
        return instance
    def unzip_file(self, filename, pattern='*'):
        '''
        extract *.tar.gz files
        returns list of extracted file names
        '''
        tar = tarfile.open(name=filename)
        dir = tempfile.mkdtemp(prefix='tmp_sugarcrm_migration')
        tar.extractall(path=dir)
        return dir, glob.glob('%s/%s' % (dir, pattern))+glob.glob('%s/*/%s' % (dir, pattern))
 | 
	lgpl-3.0 | 
| 
	kdebrab/pandas | 
	pandas/tests/io/parser/index_col.py | 
	20 | 
	5352 | 
	# -*- coding: utf-8 -*-
"""
Tests that the specified index column (a.k.a 'index_col')
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
import pytest
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO
class IndexColTests(object):
    def test_index_col_named(self):
        no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""  # noqa
        h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"  # noqa
        data = h + no_header
        rs = self.read_csv(StringIO(data), index_col='ID')
        xp = self.read_csv(StringIO(data), header=0).set_index('ID')
        tm.assert_frame_equal(rs, xp)
        pytest.raises(ValueError, self.read_csv, StringIO(no_header),
                      index_col='ID')
        data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
        names = ['a', 'b', 'c', 'd', 'message']
        xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
                        'd': [4, 8, 12]},
                       index=Index(['hello', 'world', 'foo'], name='message'))
        rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
        tm.assert_frame_equal(xp, rs)
        assert xp.index.name == rs.index.name
        rs = self.read_csv(StringIO(data), names=names, index_col='message')
        tm.assert_frame_equal(xp, rs)
        assert xp.index.name == rs.index.name
    def test_index_col_is_true(self):
        # see gh-9798
        pytest.raises(ValueError, self.read_csv,
                      StringIO(self.ts_data), index_col=True)
    def test_infer_index_col(self):
        data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
        data = self.read_csv(StringIO(data))
        assert data.index.equals(Index(['foo', 'bar', 'baz']))
    def test_empty_index_col_scenarios(self):
        data = 'x,y,z'
        # None, no index
        index_col, expected = None, DataFrame([], columns=list('xyz')),
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col), expected)
        # False, no index
        index_col, expected = False, DataFrame([], columns=list('xyz')),
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col), expected)
        # int, first column
        index_col, expected = 0, DataFrame(
            [], columns=['y', 'z'], index=Index([], name='x'))
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col), expected)
        # int, not first column
        index_col, expected = 1, DataFrame(
            [], columns=['x', 'z'], index=Index([], name='y'))
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col), expected)
        # str, first column
        index_col, expected = 'x', DataFrame(
            [], columns=['y', 'z'], index=Index([], name='x'))
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col), expected)
        # str, not the first column
        index_col, expected = 'y', DataFrame(
            [], columns=['x', 'z'], index=Index([], name='y'))
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col), expected)
        # list of int
        index_col, expected = [0, 1], DataFrame(
            [], columns=['z'], index=MultiIndex.from_arrays(
                [[]] * 2, names=['x', 'y']))
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col),
            expected, check_index_type=False)
        # list of str
        index_col = ['x', 'y']
        expected = DataFrame([], columns=['z'],
                             index=MultiIndex.from_arrays(
                                 [[]] * 2, names=['x', 'y']))
        tm.assert_frame_equal(self.read_csv(StringIO(
            data), index_col=index_col),
            expected, check_index_type=False)
        # list of int, reversed sequence
        index_col = [1, 0]
        expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
            [[]] * 2, names=['y', 'x']))
        tm.assert_frame_equal(self.read_csv(
            StringIO(data), index_col=index_col),
            expected, check_index_type=False)
        # list of str, reversed sequence
        index_col = ['y', 'x']
        expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
            [[]] * 2, names=['y', 'x']))
        tm.assert_frame_equal(self.read_csv(StringIO(
            data), index_col=index_col),
            expected, check_index_type=False)
    def test_empty_with_index_col_false(self):
        # see gh-10413
        data = 'x,y'
        result = self.read_csv(StringIO(data), index_col=False)
        expected = DataFrame([], columns=['x', 'y'])
        tm.assert_frame_equal(result, expected)
 | 
	bsd-3-clause | 
| 
	nileracecrew/seaborn | 
	seaborn/distributions.py | 
	21 | 
	28328 | 
	"""Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from six import string_types
try:
    import statsmodels.nonparametric.api as smnp
    _has_statsmodels = True
except ImportError:
    _has_statsmodels = False
from .utils import set_hls_values, iqr, _kde_support
from .palettes import color_palette, blend_palette
from .axisgrid import JointGrid
def _freedman_diaconis_bins(a):
    """Calculate number of hist bins using Freedman-Diaconis rule."""
    # From http://stats.stackexchange.com/questions/798/
    a = np.asarray(a)
    h = 2 * iqr(a) / (len(a) ** (1 / 3))
    # fall back to sqrt(a) bins if iqr is 0
    if h == 0:
        return np.sqrt(a.size)
    else:
        return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
             hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
             color=None, vertical=False, norm_hist=False, axlabel=None,
             label=None, ax=None):
    """Flexibly plot a univariate distribution of observations.
    This function combines the matplotlib ``hist`` function (with automatic
    calculation of a good default bin size) with the seaborn :func:`kdeplot`
    and :func:`rugplot` functions. It can also fit ``scipy.stats``
    distributions and plot the estimated PDF over the data.
    Parameters
    ----------
    a : Series, 1d-array, or list.
        Observed data. If this is a Series object with a ``name`` attribute,
        the name will be used to label the data axis.
    bins : argument for matplotlib hist(), or None, optional
        Specification of hist bins, or None to use Freedman-Diaconis rule.
    hist : bool, optional
        Whether to plot a (normed) histogram.
    kde : bool, optional
        Whether to plot a gaussian kernel density estimate.
    rug : bool, optional
        Whether to draw a rugplot on the support axis.
    fit : random variable object, optional
        An object with `fit` method, returning a tuple that can be passed to a
        `pdf` method a positional arguments following an grid of values to
        evaluate the pdf on.
    {hist, kde, rug, fit}_kws : dictionaries, optional
        Keyword arguments for underlying plotting functions.
    color : matplotlib color, optional
        Color to plot everything but the fitted curve in.
    vertical : bool, optional
        If True, oberved values are on y-axis.
    norm_hist : bool, otional
        If True, the histogram height shows a density rather than a count.
        This is implied if a KDE or fitted density is plotted.
    axlabel : string, False, or None, optional
        Name for the support axis label. If None, will try to get it
        from a.namel if False, do not set a label.
    label : string, optional
        Legend label for the relevent component of the plot
    ax : matplotlib axis, optional
        if provided, plot on this axis
    Returns
    -------
    ax : matplotlib Axes
        Returns the Axes object with the plot for further tweaking.
    See Also
    --------
    kdeplot : Show a univariate or bivariate distribution with a kernel
              density estimate.
    rugplot : Draw small vertical lines to show each observation in a
              distribution.
    Examples
    --------
    Show a default plot with a kernel density estimate and histogram with bin
    size determined automatically with a reference rule:
    .. plot::
        :context: close-figs
        >>> import seaborn as sns, numpy as np
        >>> sns.set(rc={"figure.figsize": (8, 4)}); np.random.seed(0)
        >>> x = np.random.randn(100)
        >>> ax = sns.distplot(x)
    Use Pandas objects to get an informative axis label:
    .. plot::
        :context: close-figs
        >>> import pandas as pd
        >>> x = pd.Series(x, name="x variable")
        >>> ax = sns.distplot(x)
    Plot the distribution with a kenel density estimate and rug plot:
    .. plot::
        :context: close-figs
        >>> ax = sns.distplot(x, rug=True, hist=False)
    Plot the distribution with a histogram and maximum likelihood gaussian
    distribution fit:
    .. plot::
        :context: close-figs
        >>> from scipy.stats import norm
        >>> ax = sns.distplot(x, fit=norm, kde=False)
    Plot the distribution on the vertical axis:
    .. plot::
        :context: close-figs
        >>> ax = sns.distplot(x, vertical=True)
    Change the color of all the plot elements:
    .. plot::
        :context: close-figs
        >>> sns.set_color_codes()
        >>> ax = sns.distplot(x, color="y")
    Pass specific parameters to the underlying plot functions:
    .. plot::
        :context: close-figs
        >>> ax = sns.distplot(x, rug=True, rug_kws={"color": "g"},
        ...                   kde_kws={"color": "k", "lw": 3, "label": "KDE"},
        ...                   hist_kws={"histtype": "step", "linewidth": 3,
        ...                             "alpha": 1, "color": "g"})
    """
    if ax is None:
        ax = plt.gca()
    # Intelligently label the support axis
    label_ax = bool(axlabel)
    if axlabel is None and hasattr(a, "name"):
        axlabel = a.name
        if axlabel is not None:
            label_ax = True
    # Make a a 1-d array
    a = np.asarray(a).squeeze()
    # Decide if the hist is normed
    norm_hist = norm_hist or kde or (fit is not None)
    # Handle dictionary defaults
    if hist_kws is None:
        hist_kws = dict()
    if kde_kws is None:
        kde_kws = dict()
    if rug_kws is None:
        rug_kws = dict()
    if fit_kws is None:
        fit_kws = dict()
    # Get the color from the current color cycle
    if color is None:
        if vertical:
            line, = ax.plot(0, a.mean())
        else:
            line, = ax.plot(a.mean(), 0)
        color = line.get_color()
        line.remove()
    # Plug the label into the right kwarg dictionary
    if label is not None:
        if hist:
            hist_kws["label"] = label
        elif kde:
            kde_kws["label"] = label
        elif rug:
            rug_kws["label"] = label
        elif fit:
            fit_kws["label"] = label
    if hist:
        if bins is None:
            bins = min(_freedman_diaconis_bins(a), 50)
        hist_kws.setdefault("alpha", 0.4)
        hist_kws.setdefault("normed", norm_hist)
        orientation = "horizontal" if vertical else "vertical"
        hist_color = hist_kws.pop("color", color)
        ax.hist(a, bins, orientation=orientation,
                color=hist_color, **hist_kws)
        if hist_color != color:
            hist_kws["color"] = hist_color
    if kde:
        kde_color = kde_kws.pop("color", color)
        kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
        if kde_color != color:
            kde_kws["color"] = kde_color
    if rug:
        rug_color = rug_kws.pop("color", color)
        axis = "y" if vertical else "x"
        rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
        if rug_color != color:
            rug_kws["color"] = rug_color
    if fit is not None:
        fit_color = fit_kws.pop("color", "#282828")
        gridsize = fit_kws.pop("gridsize", 200)
        cut = fit_kws.pop("cut", 3)
        clip = fit_kws.pop("clip", (-np.inf, np.inf))
        bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
        x = _kde_support(a, bw, gridsize, cut, clip)
        params = fit.fit(a)
        pdf = lambda x: fit.pdf(x, *params)
        y = pdf(x)
        if vertical:
            x, y = y, x
        ax.plot(x, y, color=fit_color, **fit_kws)
        if fit_color != "#282828":
            fit_kws["color"] = fit_color
    if label_ax:
        if vertical:
            ax.set_ylabel(axlabel)
        else:
            ax.set_xlabel(axlabel)
    return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
                        clip, legend, ax, cumulative=False, **kwargs):
    """Plot a univariate kernel density estimate on one of the axes."""
    # Sort out the clipping
    if clip is None:
        clip = (-np.inf, np.inf)
    # Calculate the KDE
    if _has_statsmodels:
        # Prefer using statsmodels for kernel flexibility
        x, y = _statsmodels_univariate_kde(data, kernel, bw,
                                           gridsize, cut, clip,
                                           cumulative=cumulative)
    else:
        # Fall back to scipy if missing statsmodels
        if kernel != "gau":
            kernel = "gau"
            msg = "Kernel other than `gau` requires statsmodels."
            warnings.warn(msg, UserWarning)
        if cumulative:
            raise ImportError("Cumulative distributions are currently"
                              "only implemented in statsmodels."
                              "Please install statsmodels.")
        x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
    # Make sure the density is nonnegative
    y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
    # Flip the data if the plot should be on the y axis
    if vertical:
        x, y = y, x
    # Check if a label was specified in the call
    label = kwargs.pop("label", None)
    # Otherwise check if the data object has a name
    if label is None and hasattr(data, "name"):
        label = data.name
    # Decide if we're going to add a legend
    legend = label is not None and legend
    label = "_nolegend_" if label is None else label
    # Use the active color cycle to find the plot color
    line, = ax.plot(x, y, **kwargs)
    color = line.get_color()
    line.remove()
    kwargs.pop("color", None)
    # Draw the KDE plot and, optionally, shade
    ax.plot(x, y, color=color, label=label, **kwargs)
    alpha = kwargs.get("alpha", 0.25)
    if shade:
        if vertical:
            ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
        else:
            ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
    # Draw the legend here
    if legend:
        ax.legend(loc="best")
    return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
                                cumulative=False):
    """Compute a univariate kernel density estimate using statsmodels."""
    fft = kernel == "gau"
    kde = smnp.KDEUnivariate(data)
    kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
    if cumulative:
        grid, y = kde.support, kde.cdf
    else:
        grid, y = kde.support, kde.density
    return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
    """Compute a univariate kernel density estimate using scipy."""
    try:
        kde = stats.gaussian_kde(data, bw_method=bw)
    except TypeError:
        kde = stats.gaussian_kde(data)
        if bw != "scott":  # scipy default
            msg = ("Ignoring bandwidth choice, "
                   "please upgrade scipy to use a different bandwidth.")
            warnings.warn(msg, UserWarning)
    if isinstance(bw, string_types):
        bw = "scotts" if bw == "scott" else bw
        bw = getattr(kde, "%s_factor" % bw)()
    grid = _kde_support(data, bw, gridsize, cut, clip)
    y = kde(grid)
    return grid, y
def _bivariate_kdeplot(x, y, filled, fill_lowest,
                       kernel, bw, gridsize, cut, clip,
                       axlabel, ax, **kwargs):
    """Plot a joint KDE estimate as a bivariate contour plot."""
    # Determine the clipping
    if clip is None:
        clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
    elif np.ndim(clip) == 1:
        clip = [clip, clip]
    # Calculate the KDE
    if _has_statsmodels:
        xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
    else:
        xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
    # Plot the contours
    n_levels = kwargs.pop("n_levels", 10)
    cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
    if isinstance(cmap, string_types):
        if cmap.endswith("_d"):
            pal = ["#333333"]
            pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
            cmap = blend_palette(pal, as_cmap=True)
        else:
            cmap = mpl.cm.get_cmap(cmap)
    kwargs["cmap"] = cmap
    contour_func = ax.contourf if filled else ax.contour
    cset = contour_func(xx, yy, z, n_levels, **kwargs)
    if filled and not fill_lowest:
        cset.collections[0].set_alpha(0)
    kwargs["n_levels"] = n_levels
    # Label the axes
    if hasattr(x, "name") and axlabel:
        ax.set_xlabel(x.name)
    if hasattr(y, "name") and axlabel:
        ax.set_ylabel(y.name)
    return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
    """Compute a bivariate kde using statsmodels."""
    if isinstance(bw, string_types):
        bw_func = getattr(smnp.bandwidths, "bw_" + bw)
        x_bw = bw_func(x)
        y_bw = bw_func(y)
        bw = [x_bw, y_bw]
    elif np.isscalar(bw):
        bw = [bw, bw]
    if isinstance(x, pd.Series):
        x = x.values
    if isinstance(y, pd.Series):
        y = y.values
    kde = smnp.KDEMultivariate([x, y], "cc", bw)
    x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
    y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
    xx, yy = np.meshgrid(x_support, y_support)
    z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
    return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
    """Compute a bivariate kde using scipy."""
    data = np.c_[x, y]
    kde = stats.gaussian_kde(data.T)
    data_std = data.std(axis=0, ddof=1)
    if isinstance(bw, string_types):
        bw = "scotts" if bw == "scott" else bw
        bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
        bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
    elif np.isscalar(bw):
        bw_x, bw_y = bw, bw
    else:
        msg = ("Cannot specify a different bandwidth for each dimension "
               "with the scipy backend. You should install statsmodels.")
        raise ValueError(msg)
    x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
    y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
    xx, yy = np.meshgrid(x_support, y_support)
    z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
    return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
            bw="scott", gridsize=100, cut=3, clip=None, legend=True,
            cumulative=False, shade_lowest=True, ax=None, **kwargs):
    """Fit and plot a univariate or bivariate kernel density estimate.
    Parameters
    ----------
    data : 1d array-like
        Input data.
    data2: 1d array-like
        Second input data. If present, a bivariate KDE will be estimated.
    shade : bool, optional
        If True, shade in the area under the KDE curve (or draw with filled
        contours when data is bivariate).
    vertical : bool
        If True, density is on x-axis.
    kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
        Code for shape of kernel to fit with. Bivariate KDE can only use
        gaussian kernel.
    bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
        Name of reference method to determine kernel size, scalar factor,
        or scalar for each dimension of the bivariate plot.
    gridsize : int, optional
        Number of discrete points in the evaluation grid.
    cut : scalar, optional
        Draw the estimate to cut * bw from the extreme data points.
    clip : pair of scalars, or pair of pair of scalars, optional
        Lower and upper bounds for datapoints used to fit KDE. Can provide
        a pair of (low, high) bounds for bivariate plots.
    legend : bool, optinal
        If True, add a legend or label the axes when possible.
    cumulative : bool
        If True, draw the cumulative distribution estimated by the kde.
    shade_lowest : bool
        If True, shade the lowest contour of a bivariate KDE plot. Not
        relevant when drawing a univariate plot or when ``shade=False``.
        Setting this to ``False`` can be useful when you want multiple
        densities on the same Axes.
    ax : matplotlib axis, optional
        Axis to plot on, otherwise uses current axis.
    kwargs : key, value pairings
        Other keyword arguments are passed to ``plt.plot()`` or
        ``plt.contour{f}`` depending on whether a univariate or bivariate
        plot is being drawn.
    Returns
    -------
    ax : matplotlib Axes
        Axes with plot.
    See Also
    --------
    distplot: Flexibly plot a univariate distribution of observations.
    jointplot: Plot a joint dataset with bivariate and marginal distributions.
    Examples
    --------
    Plot a basic univariate density:
    .. plot::
        :context: close-figs
        >>> import numpy as np; np.random.seed(10)
        >>> import seaborn as sns; sns.set(color_codes=True)
        >>> mean, cov = [0, 2], [(1, .5), (.5, 1)]
        >>> x, y = np.random.multivariate_normal(mean, cov, size=50).T
        >>> ax = sns.kdeplot(x)
    Shade under the density curve and use a different color:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(x, shade=True, color="r")
    Plot a bivariate density:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(x, y)
    Use filled contours:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(x, y, shade=True)
    Use more contour levels and a different color palette:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(x, y, n_levels=30, cmap="Purples_d")
    Use a narrower bandwith:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(x, bw=.15)
    Plot the density on the vertical axis:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(y, vertical=True)
    Limit the density curve within the range of the data:
    .. plot::
        :context: close-figs
        >>> ax = sns.kdeplot(x, cut=0)
    Plot two shaded bivariate densities:
    .. plot::
        :context: close-figs
        >>> iris = sns.load_dataset("iris")
        >>> setosa = iris.loc[iris.species == "setosa"]
        >>> virginica = iris.loc[iris.species == "virginica"]
        >>> ax = sns.kdeplot(setosa.sepal_width, setosa.sepal_length,
        ...                  cmap="Reds", shade=True, shade_lowest=False)
        >>> ax = sns.kdeplot(virginica.sepal_width, virginica.sepal_length,
        ...                  cmap="Blues", shade=True, shade_lowest=False)
    """
    if ax is None:
        ax = plt.gca()
    data = data.astype(np.float64)
    if data2 is not None:
        data2 = data2.astype(np.float64)
    bivariate = False
    if isinstance(data, np.ndarray) and np.ndim(data) > 1:
        bivariate = True
        x, y = data.T
    elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
        bivariate = True
        x = data.iloc[:, 0].values
        y = data.iloc[:, 1].values
    elif data2 is not None:
        bivariate = True
        x = data
        y = data2
    if bivariate and cumulative:
        raise TypeError("Cumulative distribution plots are not"
                        "supported for bivariate distributions.")
    if bivariate:
        ax = _bivariate_kdeplot(x, y, shade, shade_lowest,
                                kernel, bw, gridsize, cut, clip, legend,
                                ax, **kwargs)
    else:
        ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
                                 gridsize, cut, clip, legend, ax,
                                 cumulative=cumulative, **kwargs)
    return ax
def rugplot(a, height=.05, axis="x", ax=None, **kwargs):
    """Plot datapoints in an array as sticks on an axis.
    Parameters
    ----------
    a : vector
        1D array of observations.
    height : scalar, optional
        Height of ticks as proportion of the axis.
    axis : {'x' | 'y'}, optional
        Axis to draw rugplot on.
    ax : matplotlib axes
        Axes to draw plot into; otherwise grabs current axes.
    kwargs : key, value mappings
        Other keyword arguments are passed to ``axvline`` or ``axhline``.
    Returns
    -------
    ax : matplotlib axes
        The Axes object with the plot on it.
    """
    if ax is None:
        ax = plt.gca()
    a = np.asarray(a)
    vertical = kwargs.pop("vertical", axis == "y")
    func = ax.axhline if vertical else ax.axvline
    kwargs.setdefault("linewidth", 1)
    for pt in a:
        func(pt, 0, height, **kwargs)
    return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
              color=None, size=6, ratio=5, space=.2,
              dropna=True, xlim=None, ylim=None,
              joint_kws=None, marginal_kws=None, annot_kws=None, **kwargs):
    """Draw a plot of two variables with bivariate and univariate graphs.
    This function provides a convenient interface to the :class:`JointGrid`
    class, with several canned plot kinds. This is intended to be a fairly
    lightweight wrapper; if you need more flexibility, you should use
    :class:`JointGrid` directly.
    Parameters
    ----------
    x, y : strings or vectors
        Data or names of variables in ``data``.
    data : DataFrame, optional
        DataFrame when ``x`` and ``y`` are variable names.
    kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
        Kind of plot to draw.
    stat_func : callable or None
        Function used to calculate a statistic about the relationship and
        annotate the plot. Should map `x` and `y` either to a single value
        or to a (value, p) tuple. Set to ``None`` if you don't want to
        annotate the plot.
    color : matplotlib color, optional
        Color used for the plot elements.
    size : numeric, optional
        Size of the figure (it will be square).
    ratio : numeric, optional
        Ratio of joint axes size to marginal axes height.
    space : numeric, optional
        Space between the joint and marginal axes
    dropna : bool, optional
        If True, remove observations that are missing from ``x`` and ``y``.
    {x, y}lim : two-tuples, optional
        Axis limits to set before plotting.
    {joint, marginal, annot}_kws : dicts
        Additional keyword arguments for the plot components.
    kwargs : key, value pairs
        Additional keyword arguments are passed to the function used to
        draw the plot on the joint Axes, superseding items in the
        ``joint_kws`` dictionary.
    Returns
    -------
    grid : :class:`JointGrid`
        :class:`JointGrid` object with the plot on it.
    See Also
    --------
    JointGrid : The Grid class used for drawing this plot. Use it directly if
                you need more flexibility.
    Examples
    --------
    Draw a scatterplot with marginal histograms:
    .. plot::
        :context: close-figs
        >>> import numpy as np, pandas as pd; np.random.seed(0)
        >>> import seaborn as sns; sns.set(style="white", color_codes=True)
        >>> tips = sns.load_dataset("tips")
        >>> g = sns.jointplot(x="total_bill", y="tip", data=tips)
    Add regression and kernel density fits:
    .. plot::
        :context: close-figs
        >>> g = sns.jointplot("total_bill", "tip", data=tips, kind="reg")
    Replace the scatterplot with a joint histogram using hexagonal bins:
    .. plot::
        :context: close-figs
        >>> g = sns.jointplot("total_bill", "tip", data=tips, kind="hex")
    Replace the scatterplots and histograms with density estimates and align
    the marginal Axes tightly with the joint Axes:
    .. plot::
        :context: close-figs
        >>> iris = sns.load_dataset("iris")
        >>> g = sns.jointplot("sepal_width", "petal_length", data=iris,
        ...                   kind="kde", space=0, color="g")
    Use a different statistic for the annotation:
    .. plot::
        :context: close-figs
        >>> from scipy.stats import spearmanr
        >>> g = sns.jointplot("size", "total_bill", data=tips,
        ...                   stat_func=spearmanr, color="m")
    Draw a scatterplot, then add a joint density estimate:
    .. plot::
        :context: close-figs
        >>> g = (sns.jointplot("sepal_length", "sepal_width",
        ...                    data=iris, color="k")
        ...         .plot_joint(sns.kdeplot, zorder=0, n_levels=6))
    Pass vectors in directly without using Pandas, then name the axes:
    .. plot::
        :context: close-figs
        >>> x, y = np.random.randn(2, 300)
        >>> g = (sns.jointplot(x, y, kind="hex", stat_func=None)
        ...         .set_axis_labels("x", "y"))
    Draw a smaller figure with more space devoted to the marginal plots:
    .. plot::
        :context: close-figs
        >>> g = sns.jointplot("total_bill", "tip", data=tips,
        ...                   size=5, ratio=3, color="g")
    Pass keyword arguments down to the underlying plots:
    .. plot::
        :context: close-figs
        >>> g = sns.jointplot("petal_length", "sepal_length", data=iris,
        ...                   marginal_kws=dict(bins=15, rug=True),
        ...                   annot_kws=dict(stat="r"),
        ...                   s=40, edgecolor="w", linewidth=1)
    """
    # Set up empty default kwarg dicts
    if joint_kws is None:
        joint_kws = {}
    joint_kws.update(kwargs)
    if marginal_kws is None:
        marginal_kws = {}
    if annot_kws is None:
        annot_kws = {}
    # Make a colormap based off the plot color
    if color is None:
        color = color_palette()[0]
    color_rgb = mpl.colors.colorConverter.to_rgb(color)
    colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
    cmap = blend_palette(colors, as_cmap=True)
    # Initialize the JointGrid object
    grid = JointGrid(x, y, data, dropna=dropna,
                     size=size, ratio=ratio, space=space,
                     xlim=xlim, ylim=ylim)
    # Plot the data using the grid
    if kind == "scatter":
        joint_kws.setdefault("color", color)
        grid.plot_joint(plt.scatter, **joint_kws)
        marginal_kws.setdefault("kde", False)
        marginal_kws.setdefault("color", color)
        grid.plot_marginals(distplot, **marginal_kws)
    elif kind.startswith("hex"):
        x_bins = _freedman_diaconis_bins(grid.x)
        y_bins = _freedman_diaconis_bins(grid.y)
        gridsize = int(np.mean([x_bins, y_bins]))
        joint_kws.setdefault("gridsize", gridsize)
        joint_kws.setdefault("cmap", cmap)
        grid.plot_joint(plt.hexbin, **joint_kws)
        marginal_kws.setdefault("kde", False)
        marginal_kws.setdefault("color", color)
        grid.plot_marginals(distplot, **marginal_kws)
    elif kind.startswith("kde"):
        joint_kws.setdefault("shade", True)
        joint_kws.setdefault("cmap", cmap)
        grid.plot_joint(kdeplot, **joint_kws)
        marginal_kws.setdefault("shade", True)
        marginal_kws.setdefault("color", color)
        grid.plot_marginals(kdeplot, **marginal_kws)
    elif kind.startswith("reg"):
        from .linearmodels import regplot
        marginal_kws.setdefault("color", color)
        grid.plot_marginals(distplot, **marginal_kws)
        joint_kws.setdefault("color", color)
        grid.plot_joint(regplot, **joint_kws)
    elif kind.startswith("resid"):
        from .linearmodels import residplot
        joint_kws.setdefault("color", color)
        grid.plot_joint(residplot, **joint_kws)
        x, y = grid.ax_joint.collections[0].get_offsets().T
        marginal_kws.setdefault("color", color)
        marginal_kws.setdefault("kde", False)
        distplot(x, ax=grid.ax_marg_x, **marginal_kws)
        distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
                 **marginal_kws)
        stat_func = None
    else:
        msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
        raise ValueError(msg)
    if stat_func is not None:
        grid.annotate(stat_func, **annot_kws)
    return grid
 | 
	bsd-3-clause | 
| 
	mattgiguere/scikit-learn | 
	examples/feature_selection/plot_feature_selection.py | 
	249 | 
	2827 | 
	"""
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
        label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
        width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
 | 
	bsd-3-clause | 
| 
	antoan2/incubator-mxnet | 
	example/ssd/detect/detector.py | 
	30 | 
	7112 | 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.testdb import TestDB
from dataset.iterator import DetIter
class Detector(object):
    """
    SSD detector which hold a detection network and wraps detection API
    Parameters:
    ----------
    symbol : mx.Symbol
        detection network Symbol
    model_prefix : str
        name prefix of trained model
    epoch : int
        load epoch of trained model
    data_shape : int
        input data resize shape
    mean_pixels : tuple of float
        (mean_r, mean_g, mean_b)
    batch_size : int
        run detection with batch size
    ctx : mx.ctx
        device to use, if None, use mx.cpu() as default context
    """
    def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
                 batch_size=1, ctx=None):
        self.ctx = ctx
        if self.ctx is None:
            self.ctx = mx.cpu()
        load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
        if symbol is None:
            symbol = load_symbol
        self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
        if not isinstance(data_shape, tuple):
            data_shape = (data_shape, data_shape)
        self.data_shape = data_shape
        self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape[0], data_shape[1]))])
        self.mod.set_params(args, auxs)
        self.mean_pixels = mean_pixels
    def detect(self, det_iter, show_timer=False):
        """
        detect all images in iterator
        Parameters:
        ----------
        det_iter : DetIter
            iterator for all testing images
        show_timer : Boolean
            whether to print out detection exec time
        Returns:
        ----------
        list of detection results
        """
        num_images = det_iter._size
        if not isinstance(det_iter, mx.io.PrefetchingIter):
            det_iter = mx.io.PrefetchingIter(det_iter)
        start = timer()
        detections = self.mod.predict(det_iter).asnumpy()
        time_elapsed = timer() - start
        if show_timer:
            print("Detection time for {} images: {:.4f} sec".format(
                num_images, time_elapsed))
        result = []
        for i in range(detections.shape[0]):
            det = detections[i, :, :]
            res = det[np.where(det[:, 0] >= 0)[0]]
            result.append(res)
        return result
    def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
        """
        wrapper for detecting multiple images
        Parameters:
        ----------
        im_list : list of str
            image path or list of image paths
        root_dir : str
            directory of input images, optional if image path already
            has full directory information
        extension : str
            image extension, eg. ".jpg", optional
        Returns:
        ----------
        list of detection results in format [det0, det1...], det is in
        format np.array([id, score, xmin, ymin, xmax, ymax]...)
        """
        test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
        test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
                            is_train=False)
        return self.detect(test_iter, show_timer)
    def visualize_detection(self, img, dets, classes=[], thresh=0.6):
        """
        visualize detections in one image
        Parameters:
        ----------
        img : numpy.array
            image, in bgr format
        dets : numpy.array
            ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
            each row is one object
        classes : tuple or list of str
            class names
        thresh : float
            score threshold
        """
        import matplotlib.pyplot as plt
        import random
        plt.imshow(img)
        height = img.shape[0]
        width = img.shape[1]
        colors = dict()
        for i in range(dets.shape[0]):
            cls_id = int(dets[i, 0])
            if cls_id >= 0:
                score = dets[i, 1]
                if score > thresh:
                    if cls_id not in colors:
                        colors[cls_id] = (random.random(), random.random(), random.random())
                    xmin = int(dets[i, 2] * width)
                    ymin = int(dets[i, 3] * height)
                    xmax = int(dets[i, 4] * width)
                    ymax = int(dets[i, 5] * height)
                    rect = plt.Rectangle((xmin, ymin), xmax - xmin,
                                         ymax - ymin, fill=False,
                                         edgecolor=colors[cls_id],
                                         linewidth=3.5)
                    plt.gca().add_patch(rect)
                    class_name = str(cls_id)
                    if classes and len(classes) > cls_id:
                        class_name = classes[cls_id]
                    plt.gca().text(xmin, ymin - 2,
                                    '{:s} {:.3f}'.format(class_name, score),
                                    bbox=dict(facecolor=colors[cls_id], alpha=0.5),
                                    fontsize=12, color='white')
        plt.show()
    def detect_and_visualize(self, im_list, root_dir=None, extension=None,
                             classes=[], thresh=0.6, show_timer=False):
        """
        wrapper for im_detect and visualize_detection
        Parameters:
        ----------
        im_list : list of str or str
            image path or list of image paths
        root_dir : str or None
            directory of input images, optional if image path already
            has full directory information
        extension : str or None
            image extension, eg. ".jpg", optional
        Returns:
        ----------
        """
        import cv2
        dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
        if not isinstance(im_list, list):
            im_list = [im_list]
        assert len(dets) == len(im_list)
        for k, det in enumerate(dets):
            img = cv2.imread(im_list[k])
            img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
            self.visualize_detection(img, det, classes, thresh)
 | 
	apache-2.0 | 
| 
	xray/xray | 
	xarray/tests/test_accessor_str.py | 
	1 | 
	25563 | 
	# Tests for the `str` accessor are derived from the original
# pandas string accessor tests.
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#     * Redistributions of source code must retain the above copyright
#        notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
#        copyright notice, this list of conditions and the following
#        disclaimer in the documentation and/or other materials provided
#        with the distribution.
#     * Neither the name of the copyright holder nor the names of any
#        contributors may be used to endorse or promote products derived
#        from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import numpy as np
import pytest
import xarray as xr
from . import assert_equal, requires_dask
@pytest.fixture(params=[np.str_, np.bytes_])
def dtype(request):
    return request.param
@requires_dask
def test_dask():
    import dask.array as da
    arr = da.from_array(["a", "b", "c"], chunks=-1)
    xarr = xr.DataArray(arr)
    result = xarr.str.len().compute()
    expected = xr.DataArray([1, 1, 1])
    assert_equal(result, expected)
def test_count(dtype):
    values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype)
    result = values.str.count("f[o]+")
    expected = xr.DataArray([1, 2, 4])
    assert_equal(result, expected)
def test_contains(dtype):
    values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"]).astype(dtype)
    # case insensitive using regex
    result = values.str.contains("FOO|mmm", case=False)
    expected = xr.DataArray([True, False, True, True])
    assert_equal(result, expected)
    # case insensitive without regex
    result = values.str.contains("foo", regex=False, case=False)
    expected = xr.DataArray([True, False, True, False])
    assert_equal(result, expected)
def test_starts_ends_with(dtype):
    values = xr.DataArray(["om", "foo_nom", "nom", "bar_foo", "foo"]).astype(dtype)
    result = values.str.startswith("foo")
    expected = xr.DataArray([False, True, False, False, True])
    assert_equal(result, expected)
    result = values.str.endswith("foo")
    expected = xr.DataArray([False, False, False, True, True])
    assert_equal(result, expected)
def test_case(dtype):
    da = xr.DataArray(["SOme word"]).astype(dtype)
    capitalized = xr.DataArray(["Some word"]).astype(dtype)
    lowered = xr.DataArray(["some word"]).astype(dtype)
    swapped = xr.DataArray(["soME WORD"]).astype(dtype)
    titled = xr.DataArray(["Some Word"]).astype(dtype)
    uppered = xr.DataArray(["SOME WORD"]).astype(dtype)
    assert_equal(da.str.capitalize(), capitalized)
    assert_equal(da.str.lower(), lowered)
    assert_equal(da.str.swapcase(), swapped)
    assert_equal(da.str.title(), titled)
    assert_equal(da.str.upper(), uppered)
def test_replace(dtype):
    values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
    result = values.str.replace("BAD[_]*", "")
    expected = xr.DataArray(["foobar"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.replace("BAD[_]*", "", n=1)
    expected = xr.DataArray(["foobarBAD"]).astype(dtype)
    assert_equal(result, expected)
    s = xr.DataArray(["A", "B", "C", "Aaba", "Baca", "", "CABA", "dog", "cat"]).astype(
        dtype
    )
    result = s.str.replace("A", "YYY")
    expected = xr.DataArray(
        ["YYY", "B", "C", "YYYaba", "Baca", "", "CYYYBYYY", "dog", "cat"]
    ).astype(dtype)
    assert_equal(result, expected)
    result = s.str.replace("A", "YYY", case=False)
    expected = xr.DataArray(
        ["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", "", "CYYYBYYY", "dog", "cYYYt"]
    ).astype(dtype)
    assert_equal(result, expected)
    result = s.str.replace("^.a|dog", "XX-XX ", case=False)
    expected = xr.DataArray(
        ["A", "B", "C", "XX-XX ba", "XX-XX ca", "", "XX-XX BA", "XX-XX ", "XX-XX t"]
    ).astype(dtype)
    assert_equal(result, expected)
def test_replace_callable():
    values = xr.DataArray(["fooBAD__barBAD"])
    # test with callable
    repl = lambda m: m.group(0).swapcase()
    result = values.str.replace("[a-z][A-Z]{2}", repl, n=2)
    exp = xr.DataArray(["foObaD__baRbaD"])
    assert_equal(result, exp)
    # test regex named groups
    values = xr.DataArray(["Foo Bar Baz"])
    pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
    repl = lambda m: m.group("middle").swapcase()
    result = values.str.replace(pat, repl)
    exp = xr.DataArray(["bAR"])
    assert_equal(result, exp)
def test_replace_unicode():
    # flags + unicode
    values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")])
    expected = xr.DataArray([b"abcd, \xc3\xa0".decode("utf-8")])
    pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
    result = values.str.replace(pat, ", ")
    assert_equal(result, expected)
def test_replace_compiled_regex(dtype):
    values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
    # test with compiled regex
    pat = re.compile(dtype("BAD[_]*"))
    result = values.str.replace(pat, "")
    expected = xr.DataArray(["foobar"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.replace(pat, "", n=1)
    expected = xr.DataArray(["foobarBAD"]).astype(dtype)
    assert_equal(result, expected)
    # case and flags provided to str.replace will have no effect
    # and will produce warnings
    values = xr.DataArray(["fooBAD__barBAD__bad"]).astype(dtype)
    pat = re.compile(dtype("BAD[_]*"))
    with pytest.raises(ValueError, match="case and flags cannot be"):
        result = values.str.replace(pat, "", flags=re.IGNORECASE)
    with pytest.raises(ValueError, match="case and flags cannot be"):
        result = values.str.replace(pat, "", case=False)
    with pytest.raises(ValueError, match="case and flags cannot be"):
        result = values.str.replace(pat, "", case=True)
    # test with callable
    values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype)
    repl = lambda m: m.group(0).swapcase()
    pat = re.compile(dtype("[a-z][A-Z]{2}"))
    result = values.str.replace(pat, repl, n=2)
    expected = xr.DataArray(["foObaD__baRbaD"]).astype(dtype)
    assert_equal(result, expected)
def test_replace_literal(dtype):
    # GH16808 literal replace (regex=False vs regex=True)
    values = xr.DataArray(["f.o", "foo"]).astype(dtype)
    expected = xr.DataArray(["bao", "bao"]).astype(dtype)
    result = values.str.replace("f.", "ba")
    assert_equal(result, expected)
    expected = xr.DataArray(["bao", "foo"]).astype(dtype)
    result = values.str.replace("f.", "ba", regex=False)
    assert_equal(result, expected)
    # Cannot do a literal replace if given a callable repl or compiled
    # pattern
    callable_repl = lambda m: m.group(0).swapcase()
    compiled_pat = re.compile("[a-z][A-Z]{2}")
    msg = "Cannot use a callable replacement when regex=False"
    with pytest.raises(ValueError, match=msg):
        values.str.replace("abc", callable_repl, regex=False)
    msg = "Cannot use a compiled regex as replacement pattern with regex=False"
    with pytest.raises(ValueError, match=msg):
        values.str.replace(compiled_pat, "", regex=False)
def test_repeat(dtype):
    values = xr.DataArray(["a", "b", "c", "d"]).astype(dtype)
    result = values.str.repeat(3)
    expected = xr.DataArray(["aaa", "bbb", "ccc", "ddd"]).astype(dtype)
    assert_equal(result, expected)
def test_match(dtype):
    # New match behavior introduced in 0.13
    values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype)
    result = values.str.match(".*(BAD[_]+).*(BAD)")
    expected = xr.DataArray([True, False])
    assert_equal(result, expected)
    values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype)
    result = values.str.match(".*BAD[_]+.*BAD")
    expected = xr.DataArray([True, False])
    assert_equal(result, expected)
def test_empty_str_methods():
    empty = xr.DataArray(np.empty(shape=(0,), dtype="U"))
    empty_str = empty
    empty_int = xr.DataArray(np.empty(shape=(0,), dtype=int))
    empty_bool = xr.DataArray(np.empty(shape=(0,), dtype=bool))
    empty_bytes = xr.DataArray(np.empty(shape=(0,), dtype="S"))
    assert_equal(empty_str, empty.str.title())
    assert_equal(empty_int, empty.str.count("a"))
    assert_equal(empty_bool, empty.str.contains("a"))
    assert_equal(empty_bool, empty.str.startswith("a"))
    assert_equal(empty_bool, empty.str.endswith("a"))
    assert_equal(empty_str, empty.str.lower())
    assert_equal(empty_str, empty.str.upper())
    assert_equal(empty_str, empty.str.replace("a", "b"))
    assert_equal(empty_str, empty.str.repeat(3))
    assert_equal(empty_bool, empty.str.match("^a"))
    assert_equal(empty_int, empty.str.len())
    assert_equal(empty_int, empty.str.find("a"))
    assert_equal(empty_int, empty.str.rfind("a"))
    assert_equal(empty_str, empty.str.pad(42))
    assert_equal(empty_str, empty.str.center(42))
    assert_equal(empty_str, empty.str.slice(stop=1))
    assert_equal(empty_str, empty.str.slice(step=1))
    assert_equal(empty_str, empty.str.strip())
    assert_equal(empty_str, empty.str.lstrip())
    assert_equal(empty_str, empty.str.rstrip())
    assert_equal(empty_str, empty.str.wrap(42))
    assert_equal(empty_str, empty.str.get(0))
    assert_equal(empty_str, empty_bytes.str.decode("ascii"))
    assert_equal(empty_bytes, empty.str.encode("ascii"))
    assert_equal(empty_str, empty.str.isalnum())
    assert_equal(empty_str, empty.str.isalpha())
    assert_equal(empty_str, empty.str.isdigit())
    assert_equal(empty_str, empty.str.isspace())
    assert_equal(empty_str, empty.str.islower())
    assert_equal(empty_str, empty.str.isupper())
    assert_equal(empty_str, empty.str.istitle())
    assert_equal(empty_str, empty.str.isnumeric())
    assert_equal(empty_str, empty.str.isdecimal())
    assert_equal(empty_str, empty.str.capitalize())
    assert_equal(empty_str, empty.str.swapcase())
    table = str.maketrans("a", "b")
    assert_equal(empty_str, empty.str.translate(table))
def test_ismethods(dtype):
    values = ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", "  "]
    str_s = xr.DataArray(values).astype(dtype)
    alnum_e = [True, True, True, True, True, False, True, True, False, False]
    alpha_e = [True, True, True, False, False, False, True, False, False, False]
    digit_e = [False, False, False, True, False, False, False, True, False, False]
    space_e = [False, False, False, False, False, False, False, False, False, True]
    lower_e = [False, True, False, False, False, False, False, False, False, False]
    upper_e = [True, False, False, False, True, False, True, False, False, False]
    title_e = [True, False, True, False, True, False, False, False, False, False]
    assert_equal(str_s.str.isalnum(), xr.DataArray(alnum_e))
    assert_equal(str_s.str.isalpha(), xr.DataArray(alpha_e))
    assert_equal(str_s.str.isdigit(), xr.DataArray(digit_e))
    assert_equal(str_s.str.isspace(), xr.DataArray(space_e))
    assert_equal(str_s.str.islower(), xr.DataArray(lower_e))
    assert_equal(str_s.str.isupper(), xr.DataArray(upper_e))
    assert_equal(str_s.str.istitle(), xr.DataArray(title_e))
def test_isnumeric():
    # 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
    # 0x2605: ★ not number
    # 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
    # 0xFF13: 3 Em 3
    values = ["A", "3", "¼", "★", "፸", "3", "four"]
    s = xr.DataArray(values)
    numeric_e = [False, True, True, False, True, True, False]
    decimal_e = [False, True, False, False, False, True, False]
    assert_equal(s.str.isnumeric(), xr.DataArray(numeric_e))
    assert_equal(s.str.isdecimal(), xr.DataArray(decimal_e))
def test_len(dtype):
    values = ["foo", "fooo", "fooooo", "fooooooo"]
    result = xr.DataArray(values).astype(dtype).str.len()
    expected = xr.DataArray([len(x) for x in values])
    assert_equal(result, expected)
def test_find(dtype):
    values = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"])
    values = values.astype(dtype)
    result = values.str.find("EF")
    assert_equal(result, xr.DataArray([4, 3, 1, 0, -1]))
    expected = xr.DataArray([v.find(dtype("EF")) for v in values.values])
    assert_equal(result, expected)
    result = values.str.rfind("EF")
    assert_equal(result, xr.DataArray([4, 5, 7, 4, -1]))
    expected = xr.DataArray([v.rfind(dtype("EF")) for v in values.values])
    assert_equal(result, expected)
    result = values.str.find("EF", 3)
    assert_equal(result, xr.DataArray([4, 3, 7, 4, -1]))
    expected = xr.DataArray([v.find(dtype("EF"), 3) for v in values.values])
    assert_equal(result, expected)
    result = values.str.rfind("EF", 3)
    assert_equal(result, xr.DataArray([4, 5, 7, 4, -1]))
    expected = xr.DataArray([v.rfind(dtype("EF"), 3) for v in values.values])
    assert_equal(result, expected)
    result = values.str.find("EF", 3, 6)
    assert_equal(result, xr.DataArray([4, 3, -1, 4, -1]))
    expected = xr.DataArray([v.find(dtype("EF"), 3, 6) for v in values.values])
    assert_equal(result, expected)
    result = values.str.rfind("EF", 3, 6)
    assert_equal(result, xr.DataArray([4, 3, -1, 4, -1]))
    xp = xr.DataArray([v.rfind(dtype("EF"), 3, 6) for v in values.values])
    assert_equal(result, xp)
def test_index(dtype):
    s = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"]).astype(dtype)
    result = s.str.index("EF")
    assert_equal(result, xr.DataArray([4, 3, 1, 0]))
    result = s.str.rindex("EF")
    assert_equal(result, xr.DataArray([4, 5, 7, 4]))
    result = s.str.index("EF", 3)
    assert_equal(result, xr.DataArray([4, 3, 7, 4]))
    result = s.str.rindex("EF", 3)
    assert_equal(result, xr.DataArray([4, 5, 7, 4]))
    result = s.str.index("E", 4, 8)
    assert_equal(result, xr.DataArray([4, 5, 7, 4]))
    result = s.str.rindex("E", 0, 5)
    assert_equal(result, xr.DataArray([4, 3, 1, 4]))
    with pytest.raises(ValueError):
        result = s.str.index("DE")
def test_pad(dtype):
    values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
    result = values.str.pad(5, side="left")
    expected = xr.DataArray(["    a", "    b", "    c", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.pad(5, side="right")
    expected = xr.DataArray(["a    ", "b    ", "c    ", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.pad(5, side="both")
    expected = xr.DataArray(["  a  ", "  b  ", "  c  ", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
def test_pad_fillchar(dtype):
    values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
    result = values.str.pad(5, side="left", fillchar="X")
    expected = xr.DataArray(["XXXXa", "XXXXb", "XXXXc", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.pad(5, side="right", fillchar="X")
    expected = xr.DataArray(["aXXXX", "bXXXX", "cXXXX", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.pad(5, side="both", fillchar="X")
    expected = xr.DataArray(["XXaXX", "XXbXX", "XXcXX", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    msg = "fillchar must be a character, not str"
    with pytest.raises(TypeError, match=msg):
        result = values.str.pad(5, fillchar="XY")
def test_translate():
    values = xr.DataArray(["abcdefg", "abcc", "cdddfg", "cdefggg"])
    table = str.maketrans("abc", "cde")
    result = values.str.translate(table)
    expected = xr.DataArray(["cdedefg", "cdee", "edddfg", "edefggg"])
    assert_equal(result, expected)
def test_center_ljust_rjust(dtype):
    values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype)
    result = values.str.center(5)
    expected = xr.DataArray(["  a  ", "  b  ", "  c  ", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.ljust(5)
    expected = xr.DataArray(["a    ", "b    ", "c    ", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.rjust(5)
    expected = xr.DataArray(["    a", "    b", "    c", "eeeee"]).astype(dtype)
    assert_equal(result, expected)
def test_center_ljust_rjust_fillchar(dtype):
    values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"]).astype(dtype)
    result = values.str.center(5, fillchar="X")
    expected = xr.DataArray(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"])
    assert_equal(result, expected.astype(dtype))
    result = values.str.ljust(5, fillchar="X")
    expected = xr.DataArray(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"])
    assert_equal(result, expected.astype(dtype))
    result = values.str.rjust(5, fillchar="X")
    expected = xr.DataArray(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"])
    assert_equal(result, expected.astype(dtype))
    # If fillchar is not a charatter, normal str raises TypeError
    # 'aaa'.ljust(5, 'XY')
    # TypeError: must be char, not str
    template = "fillchar must be a character, not {dtype}"
    with pytest.raises(TypeError, match=template.format(dtype="str")):
        values.str.center(5, fillchar="XY")
    with pytest.raises(TypeError, match=template.format(dtype="str")):
        values.str.ljust(5, fillchar="XY")
    with pytest.raises(TypeError, match=template.format(dtype="str")):
        values.str.rjust(5, fillchar="XY")
def test_zfill(dtype):
    values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype)
    result = values.str.zfill(5)
    expected = xr.DataArray(["00001", "00022", "00aaa", "00333", "45678"])
    assert_equal(result, expected.astype(dtype))
    result = values.str.zfill(3)
    expected = xr.DataArray(["001", "022", "aaa", "333", "45678"])
    assert_equal(result, expected.astype(dtype))
def test_slice(dtype):
    arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype)
    result = arr.str.slice(2, 5)
    exp = xr.DataArray(["foo", "bar", "baz"]).astype(dtype)
    assert_equal(result, exp)
    for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2), (3, 0, -1)]:
        try:
            result = arr.str[start:stop:step]
            expected = xr.DataArray([s[start:stop:step] for s in arr.values])
            assert_equal(result, expected.astype(dtype))
        except IndexError:
            print(f"failed on {start}:{stop}:{step}")
            raise
def test_slice_replace(dtype):
    da = lambda x: xr.DataArray(x).astype(dtype)
    values = da(["short", "a bit longer", "evenlongerthanthat", ""])
    expected = da(["shrt", "a it longer", "evnlongerthanthat", ""])
    result = values.str.slice_replace(2, 3)
    assert_equal(result, expected)
    expected = da(["shzrt", "a zit longer", "evznlongerthanthat", "z"])
    result = values.str.slice_replace(2, 3, "z")
    assert_equal(result, expected)
    expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"])
    result = values.str.slice_replace(2, 2, "z")
    assert_equal(result, expected)
    expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"])
    result = values.str.slice_replace(2, 1, "z")
    assert_equal(result, expected)
    expected = da(["shorz", "a bit longez", "evenlongerthanthaz", "z"])
    result = values.str.slice_replace(-1, None, "z")
    assert_equal(result, expected)
    expected = da(["zrt", "zer", "zat", "z"])
    result = values.str.slice_replace(None, -2, "z")
    assert_equal(result, expected)
    expected = da(["shortz", "a bit znger", "evenlozerthanthat", "z"])
    result = values.str.slice_replace(6, 8, "z")
    assert_equal(result, expected)
    expected = da(["zrt", "a zit longer", "evenlongzerthanthat", "z"])
    result = values.str.slice_replace(-10, 3, "z")
    assert_equal(result, expected)
def test_strip_lstrip_rstrip(dtype):
    values = xr.DataArray(["  aa   ", " bb \n", "cc  "]).astype(dtype)
    result = values.str.strip()
    expected = xr.DataArray(["aa", "bb", "cc"]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.lstrip()
    expected = xr.DataArray(["aa   ", "bb \n", "cc  "]).astype(dtype)
    assert_equal(result, expected)
    result = values.str.rstrip()
    expected = xr.DataArray(["  aa", " bb", "cc"]).astype(dtype)
    assert_equal(result, expected)
def test_strip_lstrip_rstrip_args(dtype):
    values = xr.DataArray(["xxABCxx", "xx BNSD", "LDFJH xx"]).astype(dtype)
    rs = values.str.strip("x")
    xp = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype)
    assert_equal(rs, xp)
    rs = values.str.lstrip("x")
    xp = xr.DataArray(["ABCxx", " BNSD", "LDFJH xx"]).astype(dtype)
    assert_equal(rs, xp)
    rs = values.str.rstrip("x")
    xp = xr.DataArray(["xxABC", "xx BNSD", "LDFJH "]).astype(dtype)
    assert_equal(rs, xp)
def test_wrap():
    # test values are: two words less than width, two words equal to width,
    # two words greater than width, one word less than width, one word
    # equal to width, one word greater than width, multiple tokens with
    # trailing whitespace equal to width
    values = xr.DataArray(
        [
            "hello world",
            "hello world!",
            "hello world!!",
            "abcdefabcde",
            "abcdefabcdef",
            "abcdefabcdefa",
            "ab ab ab ab ",
            "ab ab ab ab a",
            "\t",
        ]
    )
    # expected values
    expected = xr.DataArray(
        [
            "hello world",
            "hello world!",
            "hello\nworld!!",
            "abcdefabcde",
            "abcdefabcdef",
            "abcdefabcdef\na",
            "ab ab ab ab",
            "ab ab ab ab\na",
            "",
        ]
    )
    result = values.str.wrap(12, break_long_words=True)
    assert_equal(result, expected)
    # test with pre and post whitespace (non-unicode), NaN, and non-ascii
    # Unicode
    values = xr.DataArray(["  pre  ", "\xac\u20ac\U00008000 abadcafe"])
    expected = xr.DataArray(["  pre", "\xac\u20ac\U00008000 ab\nadcafe"])
    result = values.str.wrap(6)
    assert_equal(result, expected)
def test_wrap_kwargs_passed():
    # GH4334
    values = xr.DataArray("  hello world  ")
    result = values.str.wrap(7)
    expected = xr.DataArray("  hello\nworld")
    assert_equal(result, expected)
    result = values.str.wrap(7, drop_whitespace=False)
    expected = xr.DataArray("  hello\n world\n  ")
    assert_equal(result, expected)
def test_get(dtype):
    values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"]).astype(dtype)
    result = values.str[2]
    expected = xr.DataArray(["b", "d", "g"]).astype(dtype)
    assert_equal(result, expected)
    # bounds testing
    values = xr.DataArray(["1_2_3_4_5", "6_7_8_9_10", "11_12"]).astype(dtype)
    # positive index
    result = values.str[5]
    expected = xr.DataArray(["_", "_", ""]).astype(dtype)
    assert_equal(result, expected)
    # negative index
    result = values.str[-6]
    expected = xr.DataArray(["_", "8", ""]).astype(dtype)
    assert_equal(result, expected)
def test_get_default(dtype):
    # GH4334
    values = xr.DataArray(["a_b", "c", ""]).astype(dtype)
    result = values.str.get(2, "default")
    expected = xr.DataArray(["b", "default", "default"]).astype(dtype)
    assert_equal(result, expected)
def test_encode_decode():
    data = xr.DataArray(["a", "b", "a\xe4"])
    encoded = data.str.encode("utf-8")
    decoded = encoded.str.decode("utf-8")
    assert_equal(data, decoded)
def test_encode_decode_errors():
    encodeBase = xr.DataArray(["a", "b", "a\x9d"])
    msg = (
        r"'charmap' codec can't encode character '\\x9d' in position 1:"
        " character maps to <undefined>"
    )
    with pytest.raises(UnicodeEncodeError, match=msg):
        encodeBase.str.encode("cp1252")
    f = lambda x: x.encode("cp1252", "ignore")
    result = encodeBase.str.encode("cp1252", "ignore")
    expected = xr.DataArray([f(x) for x in encodeBase.values.tolist()])
    assert_equal(result, expected)
    decodeBase = xr.DataArray([b"a", b"b", b"a\x9d"])
    msg = (
        "'charmap' codec can't decode byte 0x9d in position 1:"
        " character maps to <undefined>"
    )
    with pytest.raises(UnicodeDecodeError, match=msg):
        decodeBase.str.decode("cp1252")
    f = lambda x: x.decode("cp1252", "ignore")
    result = decodeBase.str.decode("cp1252", "ignore")
    expected = xr.DataArray([f(x) for x in decodeBase.values.tolist()])
    assert_equal(result, expected)
 | 
	apache-2.0 | 
| 
	zakkum42/Bosch | 
	src/04-model/bosch_full_autoencoder_from_dir_with_fit.py | 
	2 | 
	22153 | 
	import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import os
import math
import glob
from sklearn.metrics import mean_squared_error
from include.dataset_fnames import generate_station_data_fname, generate_data_fname, generate_response_data_fname, train_categorical_onehot_filename, train_station_date_filename
from include.feature_lists import numeric_features, numeric_missing_features_list, numeric_features_to_work_on, categoric_features
import theano
import theano.tensor as T
import keras
from keras import backend as K
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model, Sequential
from keras.callbacks import Callback
#from keras.losses import mean_squared_error
from keras.optimizers import adam, adadelta
from keras.regularizers import L1L2
from BoschNNModel1 import BoschNNModel, BoschNNInput, BoschNNOutput, BoschNNCallback
from sklearn.model_selection import KFold
from keras.callbacks import History, EarlyStopping
import random
from datetime import datetime
cv_splits = 3
top_epoch_count = 30
def batch_count_csv(dirname):
    fnames = glob.glob1(dirname, "train_numeric_*")
    return len(fnames)
def batch_count_npz(dirname):
    fnames = glob.glob1(dirname, "train*.npz")
    return len(fnames)
def bosch_data_generator_from_csv(dirname, indices, shuffle=False, use_categoric_features=False, use_date_features=False):
    fname = generate_data_fname(sample_type='train', data_type='numeric')
    numeric_columns = pd.read_csv(fname, nrows=2).columns
    numeric_fnames = sorted(glob.glob1(dirname, "train_numeric_*"))
    
    if use_categoric_features:
        fname = train_categorical_onehot_filename
        categoric_columns = pd.read_csv(fname, nrows=2).columns
        categoric_fnames = sorted(glob.glob1(dirname, "train_categorical_*"))
    if use_date_features:
        fname = train_station_date_filename
        station_date_columns = pd.read_csv(fname, nrows=2).columns
        station_date_fnames = sorted(glob.glob1(dirname, "train_station_date_*"))
        
    if not shuffle:
        random.shuffle(indices)
    
    for list_index in range(len(indices)):
        t0 = datetime.now()
        numeric_fname = os.path.join(dirname, numeric_fnames[indices[list_index]])
        numeric_df = pd.read_csv(numeric_fname, names=numeric_columns, index_col='Id')
        del numeric_df['Response']
#         print datetime.now() - t0
        
        if use_categoric_features:
            t0 = datetime.now()
            categoric_fname = os.path.join(dirname, categoric_fnames[indices[list_index]])
            categoric_df = pd.read_csv(categoric_fname, names=categoric_columns, index_col='Id')
            numeric_df = numeric_df.join(categoric_df, how='inner')
#             print datetime.now() - t0
            del categoric_df
        if use_date_features:
            t0 = datetime.now()
            station_date_fname = os.path.join(dirname, station_date_fnames[indices[list_index]])
            station_date_df = pd.read_csv(station_date_fname, names=station_date_columns, index_col='Id')
            station_date_df = station_date_df / 1719.0 # Normalization
            numeric_df = numeric_df.join(station_date_df, how='inner')
#             print datetime.now() - t0
            del station_date_df
 
        yield numeric_df.values, numeric_df.values, list_index+1
        del numeric_df
def bosch_data_generator_from_npz(dirname, indices, shuffle=False, use_categoric_features=False, use_date_features=False):
    assert (use_categoric_features == True), "Compressed numeric only values not implemented."
# TODO:      
    assert (use_date_features == False), "Compressed date values not implemented."
    
    fname = generate_data_fname(sample_type='train', data_type='numeric')
    numeric_columns = pd.read_csv(fname, nrows=2).columns
    
    if use_categoric_features:
        fname = train_categorical_onehot_filename
        categoric_columns = pd.read_csv(fname, nrows=2).columns
    compressed_fnames = sorted(glob.glob1(dirname, "train_numeric+categoric_*.npz"))
        
    if not shuffle:
        random.shuffle(indices)
    
    for list_index in range(len(indices)):
        t0 = datetime.now()
        compressed_fname = os.path.join(dirname, compressed_fnames[indices[list_index]])
        dataz = np.load(compressed_fname)
        data = dataz['data']
#         print "Loaded in", datetime.now() - t0
         
        yield data, data, list_index+1
        del dataz
        del data
        
def load_anomaly_data(use_categoric_features, use_date_features):
    fname = generate_data_fname(sample_type='train', data_type='numeric')
    numeric_columns = pd.read_csv(fname, nrows=2).columns
    numeric_fname = "d:/Kaggle_ws/Bosch/input/train_numeric_headless_1.csv"
    numeric_df = pd.read_csv(numeric_fname, names=numeric_columns, index_col='Id')
    del numeric_df['Response']
        
    if use_categoric_features:
        fname = train_categorical_onehot_filename
        categoric_columns = pd.read_csv(fname, nrows=2).columns
        categoric_fname = "d:/Kaggle_ws/Bosch/input/train_categorical_onehot_headless_1.csv"
        categoric_df = pd.read_csv(categoric_fname, names=categoric_columns, index_col='Id')
        numeric_df = numeric_df.join(categoric_df, how='inner')
        del categoric_df
    if use_date_features:
        fname = train_station_date_filename
        station_date_columns = pd.read_csv(fname, nrows=2).columns
        station_date_fname = "d:/Kaggle_ws/Bosch/input/train_station_date_headless_1.csv"
        station_date_df = pd.read_csv(station_date_fname, names=station_date_columns, index_col='Id')
        station_date_df = station_date_df / 1719.0 # Normalization
        numeric_df = numeric_df.join(station_date_df, how='inner')
        del station_date_df
    
    return numeric_df.values
def my_mean_squared_error(y_true, y_pred):
    mask = T.invert(T.isnan(y_true))
    
    y_true = y_true[mask.nonzero()]
    y_pred = y_pred[mask.nonzero()]
 
    return K.mean(K.square(y_pred - y_true), axis=-1)
# [968, 484, 32, 16, 10, 16, 32, 484, 968]
def load_model(layer_list):
    print "Building network with layers", layer_list
    cols = layer_list[0]
    model = Sequential() # was BoschNNModel()
    
# Create Input Layer
    inputLayer = BoschNNInput(units=layer_list[1], input_shape=(cols,), name="first", activation='relu')
# Add layers to model
    model.add(inputLayer)
    for i in range(2, len(layer_list) -1):
        model.add(BatchNormalization())
        model.add(Dense(layer_list[i], name='hidden_'+str(i), activation='relu', bias_regularizer=L1L2(l1=0.01)))
# # Add Dropout Layer
#     model.add(Dropout(name="Dropout", rate=0.5))
# Create Output Layer
#     outputLayer = BoschNNOutput(model, cols, name='last', activation='linear')
    outputLayer = Dense(cols, name='last', activation='linear')
    model.add(outputLayer)
    return model
def train_autoencoder_model(dirname, model_name, hidden_layers, use_categoric_features=False, use_date_features=False, use_compressed=False):
    fname = generate_data_fname(sample_type='train', data_type='numeric')
    numeric_columns = pd.read_csv(fname, nrows=2).columns
    cols = len(numeric_columns) -1 -1 # one for 'Id' one for 'Respose'
    if use_categoric_features:
        fname = train_categorical_onehot_filename
        categoric_columns = pd.read_csv(fname, nrows=2).columns
        cols = cols + len(categoric_columns) -1 # one for 'Id' 
    if use_date_features:
        fname = train_station_date_filename
        station_date_columns = pd.read_csv(fname, nrows=2).columns
        cols = cols + len(station_date_columns) -1 # one for 'Id' 
    
    anomaly_df = load_anomaly_data(use_categoric_features, use_date_features)
    layer_list = [cols] + eval(hidden_layers) + [cols]
# Load model from somewhere
    boschAutoencoderModel = load_model(layer_list)
# Compile model
    boschAutoencoderModel.compile(optimizer='adam', loss=my_mean_squared_error)
 
# this is what we have in the model
    boschAutoencoderModel.summary()
      
# Initialize Callback for weight processing    
#     boschNNCallback = BoschNNCallback()
      
#     boschModel.fit(X, y, epochs=10, batch_size=1, shuffle=False, verbose=True, callbacks=[boschNNCallback])
      
#     boschAutoencoderModel.fit(X_train, X_train,
#                     epochs=5,
#                     batch_size=1,
#                     shuffle=False,
# #                     callbacks=[boschNNCallback],
#                     validation_data=(X_test, X_test))
    batch_count = -1 # No of files to load at every CV
    if not use_compressed:
        batch_count = batch_count_csv(dirname)
    else:
        batch_count = batch_count_npz(dirname)
    print batch_count
    kf = KFold(n_splits=cv_splits)
    
    earlystoping = EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=3, verbose=1, mode='auto')
    
    final_mse = 0.0
    best_cv_mse = 99999
    cv_count = 1
    for train_indices, test_indices in kf.split(range(batch_count)):
        print "----------------------------------------------------------------------"
        print "CV:", cv_count, "/", cv_splits
        
# Rewind model!
# Load model from somewhere
        boschAutoencoderModel = load_model(layer_list)
        
# Compile model
#         optimizer = zdam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.001)
#         boschAutoencoderModel.compile(optimizer='adadelta', loss=my_mean_squared_error)
        optimizer = adadelta(lr=1.0)
        boschAutoencoderModel.compile(optimizer=optimizer, loss=my_mean_squared_error)
# Go 3 times on each batch
        for top_epoch, _ in enumerate(range(top_epoch_count),1):
            print "Top epoch:", top_epoch, "of", top_epoch_count
            if not use_compressed:
                generator = bosch_data_generator_from_csv(dirname, train_indices, shuffle=True, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
            else:
                generator = bosch_data_generator_from_npz(dirname, train_indices, shuffle=True, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
                
            history = History()
            for X, y, file_index in generator:
                print "Set", file_index, "of", len(train_indices)
                print "LR:", boschAutoencoderModel.optimizer.lr.get_value()
                boschAutoencoderModel.fit(X, y, 
                                        batch_size=4092,
                                        epochs=3, 
#                                         verbose=2,
#                                         validation_split=0.33,
                                        shuffle=True,
#                                         callbacks=[history, earlystoping]
                                        callbacks=[history]
                                        )
               
                print cv_count, top_epoch, file_index
                print history.history
                del X
                del y
#                 new_lr = np.float32(0.97 * boschAutoencoderModel.optimizer.lr.get_value())
#                 boschAutoencoderModel.optimizer.lr.set_value(new_lr)
            print "Validating top_epoch", top_epoch,
            if not use_compressed:
                validation_generator = bosch_data_generator_from_csv(dirname, test_indices, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
            else:
                validation_generator = bosch_data_generator_from_npz(dirname, test_indices, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
                
            for X, y, _ in validation_generator:
                predictions = boschAutoencoderModel.predict(X)
                
                y_pred = predictions[np.invert(np.isnan(X)).nonzero()]
                y_true = y[np.invert(np.isnan(X)).nonzero()]
     
                mse_ = mean_squared_error(y_true, y_pred)
                print "mse_:", mse_
        fname_model = os.path.join(model_name + "_CV_" + str(cv_count) + ".h5")
        print "Saving model to", fname_model, "."
        boschAutoencoderModel.save(fname_model)
        print "Validating..."
        cv_mse = 0.0
        total = 0.0
        if not use_compressed:
            validation_generator = bosch_data_generator_from_csv(dirname, test_indices, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
        else:
            validation_generator = bosch_data_generator_from_npz(dirname, test_indices, use_categoric_features=use_categoric_features, use_date_features=use_date_features)
            
        for X, y, _ in validation_generator:
            predictions = boschAutoencoderModel.predict(X)
            
            y_pred = predictions[np.invert(np.isnan(X)).nonzero()]
            y_true = y[np.invert(np.isnan(X)).nonzero()]
 
            counts = y_true.shape[0]
            mse_ = mean_squared_error(y_true, y_pred)
            cv_mse += counts * mse_ 
            total += counts
#             
#             mask = np.invert(np.isnan(X))
#             predictions = boschAutoencoderModel.predict(X)
#             y_pred = predictions[mask.nonzero()]
#             
#             del X
#             del predictions
# 
#             y_true = y[mask.nonzero()]
# 
#             del y
#             del mask
#             
#             counts = y_true.shape[0]
#             mse_ = mean_squared_error(y_true, y_pred)
#             cv_mse += counts * mse_ 
#             total += counts
#             
#             del y_true
#             del y_pred
        
        cv_mse = cv_mse / total    
        print "CV MSE:", cv_mse
        if cv_mse < best_cv_mse:
            best_cv_mse = cv_mse
            fname_model = os.path.join(model_name + "_best_model" + ".h5")
            print "New best CV MSE. Saving model to", fname_model, "."
            boschAutoencoderModel.save(fname_model)
            predictions = boschAutoencoderModel.predict(anomaly_df)
            y_pred = predictions[np.invert(np.isnan(anomaly_df)).nonzero()]
            y_true = anomaly_df[np.invert(np.isnan(anomaly_df)).nonzero()]
            a_mse_ = mean_squared_error(y_true, y_pred)
            print "Anomaly error:", a_mse_
            
        final_mse +=  cv_mse
        cv_count += 1
    
    print "Final MSE:", final_mse / cv_splits 
    return
def load_encoder_model(model_fname, bottleneck):
    print "Loading", model_fname,
    ae_model = keras.models.load_model(
        model_fname, 
        custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
 
    bottleneck_layer = 'hidden_' + str(bottleneck)
    encoder_model = Model(inputs=ae_model.input, outputs=ae_model.get_layer(bottleneck_layer).output)
    print "Done."
    
    return encoder_model
def load_autoencoder_model(model_fname):
    print "Loading", model_fname,
    ae_model = keras.models.load_model(
        model_fname, 
        custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
 
    print "Done."
    
    return ae_model
def evaluate_model(model_fname, use_categoric_features=False):
    boschAutoencoderModel = keras.models.load_model(
        model_fname, 
        custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
 
    boschAutoencoderModel.summary()
    anomaly_df = load_anomaly_data(use_categoric_features)
    predictions = boschAutoencoderModel.predict(anomaly_df)
    mask = np.isnan(anomaly_df)
    predictions = boschAutoencoderModel.predict(anomaly_df)
    y_pred = predictions[np.invert(mask).nonzero()]
    y_true = anomaly_df[np.invert(mask).nonzero()]
    a_mse_ = mean_squared_error(y_true, y_pred)
# Is it?
#     predictions = boschAutoencoderModel.predict(anomaly_df)
#     predictions[mask] = 0
#     anomaly_df[mask] = 0
#     a_mse_ = mean_squared_error(anomaly_df, predictions)
    
    print "Anomaly error:", a_mse_
    return
def evaluate2(model_fname, use_categoric_features=False):
    anomaly_df = load_anomaly_data(use_categoric_features)
    boschAutoencoderModel = keras.models.load_model(
        model_fname, 
        custom_objects={"BoschNNInput":BoschNNInput, "my_mean_squared_error":my_mean_squared_error})
 
    boschAutoencoderModel.summary()
    print "Anomaly error:", a_mse_
    return
def predict_with_ae_model(dirname, model_fname, use_categoric_features=False, use_date_features=False, use_compressed=False):
    ae_model = load_autoencoder_model(model_fname) 
 
    ae_model.summary()
    batch_count = batch_count_csv(dirname)
    generator = bosch_data_generator_from_csv(dirname, range(batch_count), use_categoric_features=use_categoric_features, use_date_features=use_date_features)
  
    error_list = []
    for X, y, file_index in generator:
        print "Set", file_index, "of", batch_count, 
        for row_index in range(X.shape[0]):
            Xr = np.reshape(X[row_index], (1, X.shape[1]))
            yr = np.reshape(y[row_index], (1, X.shape[1]))
            mask = np.isnan(Xr)
            predictions = ae_model.predict(Xr)
            y_pred = predictions[np.invert(mask).nonzero()]
            y_true = yr[np.invert(mask).nonzero()]
            if len(y_true) > 0: # Check for all input is NaN!
                a_mse_ = mean_squared_error(y_true, y_pred)
            else:
                a_mse = 0 # I am not sure
            error_list.append(a_mse_)
            if ((row_index % 1000) == 0): print "*", 
        print
           
    np.save('errors_normal.npy', np.asmatrix(error_list, dtype='float32'))
    pd.Series(data=error_list).to_csv('errors_normal.csv')    
    
    X = load_anomaly_data(use_categoric_features, use_date_features)
    y = X
    
    error_list = []
    for row_index in range(X.shape[0]):
        Xr = np.reshape(X[row_index], (1, X.shape[1]))
        yr = np.reshape(y[row_index], (1, X.shape[1]))
        mask = np.isnan(Xr)
        predictions = ae_model.predict(Xr)
        y_pred = predictions[np.invert(mask).nonzero()]
        y_true = yr[np.invert(mask).nonzero()]
        if len(y_true) > 0: # Check for all input is NaN!
            a_mse_ = mean_squared_error(y_true, y_pred)
        else:
            a_mse = 0 # I am not sure
        error_list.append(a_mse_)
        if ((row_index % 1000) == 0): print "*", 
    print
        
    np.save('errors_anomaly.npy', np.asmatrix(error_list, dtype='float32'))
    pd.Series(data=error_list).to_csv('errors_anomaly.csv')    
   
    return
def predict_with_encoder_model(dirname, model_fname, bottleneck, use_categoric_features=False, use_date_features=False, use_compressed=False):
    encoder_model = load_encoder_model(model_fname, bottleneck=bottleneck) 
    encoder_model.summary()
    output_feature_count = encoder_model.layers[-1].output_shape[1]
    
  
    col_names = ['col_'+str(i) for i in range(output_feature_count)]
    
    normal_df = pd.DataFrame(columns=col_names, dtype='float32')
 
    batch_count = batch_count_csv(dirname)
    if (not use_compressed):
        generator = bosch_data_generator_from_csv(dirname, range(batch_count), use_categoric_features=use_categoric_features, use_date_features=use_date_features)
    else:
        generator = bosch_data_generator_from_npz(dirname, range(batch_count), use_categoric_features=use_categoric_features, use_date_features=use_date_features)
 
    for X, _, file_index in generator:
        print "Set", file_index, "of", batch_count 
        predictions = encoder_model.predict(X)
        normal_df = normal_df.append(pd.DataFrame(data=predictions, columns=col_names, dtype='float32'))
    
    normal_df['Response'] = 0    
 
    X = load_anomaly_data(use_categoric_features, use_date_features)
    predictions = encoder_model.predict(X)
    anomaly_df = pd.DataFrame(data=predictions, columns=col_names, dtype='float32')
    anomaly_df['Response'] = 1
    
    data_df = normal_df.append(anomaly_df)
    data_df.to_csv("encoder_output.csv", index=False)
        
    return
if __name__ == '__main__':
# Experiment 1 NO
#     predict_with_encoder_model("bs30000", "experiment_1_no/BAE_model_cols_4-8_numeric_only_best_model.h5", 6, use_categoric_features=False, use_compressed=False)
# Experiment 2 NC 
#     predict_with_encoder_model("bs30000", "experiment_2_nc/BAE_model_cols_4-8_best_model.h5", 6, use_categoric_features=True, use_compressed=True)
# Experiment 3 NC 
#     predict_with_ae_model("bs30000", "experiment_5_nd_2-4/BAE_model_cols_2-4_numeric+date_best_model.h5", use_categoric_features=False, use_date_features=True, use_compressed=False)
#     predict_with_encoder_model("bs30000", "experiment_5_nd_2-4/BAE_model_cols_2-4_numeric+date_best_model.h5", 6, use_categoric_features=False, use_date_features=True, use_compressed=False)
#     exit (-1)
# Experiment 6 ND 
#     predict_with_ae_model("bs30000", "experiment_8_nc_4-8-16/BAE_model_cols_4-8-16_numeric+date_best_model.h5", use_categoric_features=True, use_date_features=True, use_compressed=False)
#     predict_with_encoder_model("bs30000", "experiment_8_nc_4-8-16/BAE_model_cols_4-8-16_numeric+date_best_model.h5", 4, use_categoric_features=True, use_date_features=True, use_compressed=False)
#     exit (-1)
    train_autoencoder_model('bs30000', 
                             'BAE_model_cols_4-8-16_numeric+date', 
#                             '[cols/4, cols/8, 64, 32, 16, 6, 16, 32, 64, cols/8, cols/4]', # 15 M
#                            '[cols/2, cols/4, 64, 32, 16, 6, 16, 32, 64, cols/4, cols/2]', # 30M 
                            '[cols/4, cols/8, cols/16, 32, cols/16, cols/8, cols/4]',
                             use_categoric_features=True,
                             use_date_features=True,
                             use_compressed=False)
#   fit2:  train_autoencoder_model('bs50000', '[cols/2, cols/4, cols/8, 64, 32, 16, 6, 16, 32, 64, cols/8, cols/4, cols/2]')
 | 
	apache-2.0 | 
| 
	MarineLasbleis/SnowLayer | 
	Test2.py | 
	1 | 
	7839 | 
	#!/usr/bin/python
# Time-stamp: <2015-10-19 10:40:15 marine>
## Project : Snow in the F - layer
## Subproject : computation of the equilibrium state and stability of it
## Author : Marine Lasbleis
# external libraries
import scipy.io as io
import math
import matplotlib.pyplot as plt
import numpy as np
#files
import systemdiff
import figures
import eqResol
from param import *
test_fitddy=0  ! if 1, plot figures for testing the value of ddy
figure=1
geometry="cart"
print geometry
######################
######################
####  Load seismic observations
######################
######################
Observations = io.loadmat('/Users/marine/ownCloud/Research/PREM/AK_PREM.mat')
Ks_PREM ,   alpha_AK, alpha_PREM, r_AK,  r_PREM,   rho_AK,   rho_PREM = Observations['Ks_PREM']*1e9 ,   Observations['alpha_AK']*1000., Observations['alpha_PREM'], Observations['r_AK']*1000.,  Observations['r_PREM']*1000.,   Observations['rho_AK']*1000.,  Observations['rho_PREM']
hminP=(r_PREM>=1221e3).argmax()
hmaxP=(r_PREM>=1221e3+d).argmax()
Vp_PREM=alpha_PREM[hminP+1:hmaxP+1]
radius_PREM=r_PREM[hminP+1:hmaxP+1]
hmaxA=(r_AK<=1200.5e3).argmax()
hminA=(r_AK<=1220e3+d).argmax()
Vp_AK=alpha_AK[hminA-1:hmaxA-1]
radius_AK=r_AK[hminA-1:hmaxA-1]
r_PREM2_1=np.linspace(0.,1010.0e3,10)
alpha_PREM2_1=11.2622-6.364*(r_PREM2_1/6371.e3)**2
r_PREM2_2=np.linspace(1010.0e3,1221.5e3,10)
alpha_PREM2_2=11.3041-1.2730*(r_PREM2_2/6371.e3)
r_PREM2_3=np.linspace(1221.5e3,1621.5e3,10)
alpha_PREM2_3=4.0354+82.008*(r_PREM2_3/6371.e3)-347.769*(r_PREM2_3/6371.e3)**2+468.786*(r_PREM2_3/6371.e3)**3.
r_PREM2=np.concatenate((r_PREM2_1,r_PREM2_2,r_PREM2_3))
alpha_PREM2=np.concatenate((alpha_PREM2_1,alpha_PREM2_2,alpha_PREM2_3))*1000.
radius_PREM2=r_PREM2[20:30]
Vp_PREM2=alpha_PREM2[20:30]
KPREM=np.array([13047, 12888, 12679, 12464])
radius=np.array([1221.5e3, 1300.0e3,1400.e3,1500.e3])
ric=np.linspace(0,3500e3,30)
Kprem_labrosse2015=(K0-K0*Kprim0*(ric**2./Lrho**2+4./5.*ric**4./Lrho**4))
rho_Labrosse2015=rho0*(1-ric**2/Lrho**2-Arho*ric**4/Lrho**4)
K_labrosse2003=1777e9*rho_Labrosse2015/7.5e3*(np.log(rho_Labrosse2015/7.5e3)+1)
K_approx=KPREM[0]-(KPREM[1]-KPREM[0])/(radius[1]-radius[0])*radius[0]+(KPREM[1]-KPREM[0])/(radius[1]-radius[0])*ric
K_approx=K_approx*1.e8
rhoH=rho0*(1-rICp**2/Lrho**2-Arho*rICp**4/Lrho**4)
drhoTad=rhoH*alpha*(-0.32e-3)*(ric-rICp)#(rhoH)**(1.-gam)*alpha*rho0**gam*TL0*2.*gam * ric**2/Lrho**2.
drhoP=-rhoH**2*g*(ric-rICp)/(KPREM[0]*1e8)
rho_test=rhoH+drhoTad+drhoP
def calcK(hicb,rICB=rICp):
    """Return the value of the bulk modulus with a linear approximation of PREM at ICB
    hicb in m
    """
    ric=rICB+hicb
    return (KPREM[0]-(KPREM[1]-KPREM[0])/(radius[1]-radius[0])*radius[0]+(KPREM[1]-KPREM[0])/(radius[1]-radius[0])*ric)*1e8
f,axa =plt.subplots(2,3)
axa[0,0].plot(r_PREM/rICp,alpha_PREM,r_AK/rICp,alpha_AK,r_PREM2/rICp,alpha_PREM2)
axa[0,0].set_title('Vp (m/s)')
axa[0,1].plot(radius_PREM/rICp,Vp_PREM,radius_AK/rICp,Vp_AK,radius_PREM2/rICp,Vp_PREM2)
axa[0,1].set_title('ZOOM - Vp (m/s)')
axa[0,2].plot(ric/rICp,Kprem_labrosse2015,r_PREM/rICp, Ks_PREM,ric/rICp,K_approx, r_PREM/rICp, alpha_PREM**2*rho_PREM, ric/rICp, K_labrosse2003, ric/rICp, 1293e9*np.ones(30))
axa[0,2].set_title('Ks (Pa)')
axa[0,2].scatter(1.,1.3047e12)
axa[0,0].axis([0,2,9000.,12000.])
axa[0,2].axis([0.5,3.,0.9e12,1.8e12])
axa[0,0].set_xlabel('r/r_icb')
axa[0,1].set_xlabel('r/r_icb')
axa[1,0].plot(r_PREM/rICp,rho_PREM,r_AK/rICp,rho_AK,ric/rICp,rho_Labrosse2015,ric/rICp,rho_test)
axa[1,0].set_title('Rho (kg/m^3)')
axa[1,1].plot(r_PREM/rICp,rho_PREM,r_AK/rICp,rho_AK,ric/rICp, rho_Labrosse2015,ric/rICp,rho_test)
axa[1,1].set_title('ZOOM - Rho (kg/m^3)')
axa[1,0].axis([0,2,11000.,13500.])
axa[1,1].axis([0.95,1.35,11800.,12200.])
axa[1,0].set_xlabel('r/r_icb')
axa[1,1].set_xlabel('r/r_icb')
print rho_test[0], rho_Labrosse2015[0]
rhoH = 12530
rhoD = 12060
######################
######################
####  Compute the density and Vp profile from solving the equations
######################
######################
def SolvingEquations_DimensionalForms(BoundConditions,PeT, Mx, Mp, Veff, gamma, X0=X0, d=d, rICp=rICp, rhoH=rhoH,alpha=alpha, rhoD=rhoD, g=g, hminus=hmin,hmaxi=hmax,dh=dt,geom="cart",AbsTole=AbsTol,RelTole=AbsTol):
    X , dXdh , phiVs , h = eqResol.resolutionSystem(BoundConditions, hminus, hmaxi, dh, PeT, Mx, Mp, Veff, gamma, geom,Atol=AbsTole,Rtol=RelTole)
    x=X0*X
    z=h*d
    T=Tliq0*(1-Mp*h-X*Mx)
    DT=T-T[-1] # temperature variation in the layer compared to T[ICB]
    drhoP=-rhoH**2.*g*z/calcK(z)
    drhoT=-rhoH*alpha*DT#*(Mp*h+X*Mx)
    rhoL=(rhoD-(1-x[0])*rhoH-drhoT[0]-drhoP[0])/x[0]
    # print  rhoL
    rhoL2=x[0]/(1/(rhoD-drhoT[0]-drhoP[1])-(1-x[0])/rhoH)
    ## print rhoL
    
    rho_new=x*rhoL+(1-x)*rhoH+drhoT+drhoP
    Vp_new=np.sqrt(calcK(z)/rho_new)
    # ax5.plot(r/1e3,Vp_new)
    
    # equation 41 Gubbins 2008 (1/rho=1/rho1+1/rho2)
    rho_new2=1./(x/rhoL2+(1-x)/rhoH)+drhoT+drhoP
    Vp_new2=np.sqrt(calcK(z)/rho_new2)
    # ax5.plot(z/1e3,Vp_new2)
    return z, x, phiVs, T, rho_new, Vp_new
###########################################
###########################################
###########################################
###########################################
###########################################
###########################################
#### Opening of the figures (if needed)
if test_fitddy :     # best value of ddy to fit the seismic values 
    fig, (ax1,ax2) = plt.subplots(1, 2, sharey=True)
    fig5, ax5 = plt.subplots()
figk, axk = plt.subplots()
#### Dimensionless parameters
for k in k*np.linspace(1.,100.,10):
    # PeT, MX, MP, Veff, gamma = 1.e6 , 0.17 , 0.016 , 1.e-7, 7.2
    PeT=Vs0*d/k
    PeX=Vs0*d/lambdaX
    Mx=X0*mX/Tliq0
    Mp=rho0*g*mp*d/Tliq0
    Veff=rdot/Vs0
    gamma=7.2
    print 'PeT {0:.3e}'.format(PeT), 'PeX {0:.3e}'.format(PeX) , 'gamma ', gamma, 'Veff {0:.3e}'.format(Veff), 'Mp {0:.3e}'.format(Mp), 'Mx {0:.3e}'.format(Mx)
    ecartVp=np.ones(Nddy) # discrpency btw Vp calculated here and Vp AK at ICB+ (to obtain the best fit)
    i=0
    DDY=np.linspace(-0.01,3,Nddy)
    for ddy in DDY:
        #### Boundary conditions at h=hmin
        BoundConditions = [1. , ddy , 0. ]
        #### Resolution of the equations
        [z, x, phiVs, T, rho_new, Vp_new]= SolvingEquations_DimensionalForms(BoundConditions,PeT, Mx, Mp, Veff, gamma)
        r=rICp+z
        #### Some plots
        if test_fitddy :
            ax1.plot(x/X0,z/1.e3)
            ax2.plot(phiVs,z/1.e3)
            ax5.plot(r/1e3,Vp_new)
        ecartVp[i]=(Vp_new[-1]-Vp_AK[-1])
        i=i+1
    a=np.argmin(ecartVp**2.)
    poly=np.polyfit(DDY[a-2:a+2],ecartVp[a-2:a+2],3)
    racines=np.roots(poly)
    ddy_OK = [racine for racine in racines if racine>DDY[a-2] and racine<DDY[a+2] ]
    ddy_OK=ddy_OK[0]
    #### Resolution of the equations with the good value for ddy
    [z, x, phiVs, T, rho_new, Vp_new]= SolvingEquations_DimensionalForms([1. , ddy_OK , 0. ],PeT, Mx, Mp, Veff, gamma, geom=geometry)
    axk.plot(z,Vp_new)
    if test_fitddy :
        ax5.plot(radius_PREM/1e3,Vp_PREM,radius_AK/1e3,Vp_AK,radius_PREM2/1e3,Vp_PREM2)
        ax5.set_title('F-layer - Vp (m/s)')
        ax5.scatter(radius_AK[-1]/1e3,Vp_AK[-1])
        #ax5.plot((radius_PREM-rICp)/1.e3,Vp_PREM,(radius_AK-rICp)/1.e3,Vp_AK)
        #ax5.scatter((radius_AK[-1]-rICp)/1.e3,Vp_AK[-1])
        ax1.set_ylabel('height above ICB (km)')
        f6,ax6=plt.subplots()
        ax6.plot(DDY,ecartVp)
        ax1.plot(x/X0,z/1.e3,'ro')
        ax2.plot(phiVs,z/1.e3,'ro')
        ax5.plot(r/1e3,Vp_new,'ro')
        ax6.scatter(ddy_OK,(Vp_new[-1]-Vp_AK[-1]))
    
    
#### Figures
if test_fitddy or figure:
    plt.show()
#figures.plotXY(X,h,'X','h')
#figures.plotXY_2(X,h, phiVs,h,0,'X','h','phiVs','h')
   
 | 
	apache-2.0 | 
| 
	DonBeo/scikit-learn | 
	examples/calibration/plot_calibration.py | 
	225 | 
	4795 | 
	"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
#         Alexandre Gramfort <[email protected]>
#         Balazs Kegl <[email protected]>
#         Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3  # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
                  centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
    train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train)  # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
    this_X = X_train[y_train == this_y]
    this_sw = sw_train[y_train == this_y]
    plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
                label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
         label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
         label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
         y_test[order].reshape(25, -1).mean(1),
         'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
           "(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
 | 
	bsd-3-clause | 
| 
	potash/scikit-learn | 
	examples/cluster/plot_agglomerative_clustering_metrics.py | 
	402 | 
	4492 | 
	"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
    return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
    for _ in range(30):
        phase_noise = .01 * np.random.normal()
        amplitude_noise = .04 * np.random.normal()
        additional_noise = 1 - 2 * np.random.rand(n_features)
        # Make the noise sparse
        additional_noise[np.abs(additional_noise) < .997] = 0
        X.append(12 * ((a + amplitude_noise)
                 * (sqr(6 * (t + phi + phase_noise)))
                 + additional_noise))
        y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
                   labels):
    lines = plt.plot(X[y == l].T, c=c, alpha=.5)
    lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
    avg_dist = np.zeros((n_clusters, n_clusters))
    plt.figure(figsize=(5, 4.5))
    for i in range(n_clusters):
        for j in range(n_clusters):
            avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
                                                metric=metric).mean()
    avg_dist /= avg_dist.max()
    for i in range(n_clusters):
        for j in range(n_clusters):
            plt.text(i, j, '%5.3f' % avg_dist[i, j],
                     verticalalignment='center',
                     horizontalalignment='center')
    plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
               vmin=0)
    plt.xticks(range(n_clusters), labels, rotation=45)
    plt.yticks(range(n_clusters), labels)
    plt.colorbar()
    plt.suptitle("Interclass %s distances" % metric, size=18)
    plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
    model = AgglomerativeClustering(n_clusters=n_clusters,
                                    linkage="average", affinity=metric)
    model.fit(X)
    plt.figure()
    plt.axes([0, 0, 1, 1])
    for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
        plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
    plt.axis('tight')
    plt.axis('off')
    plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
 | 
	bsd-3-clause | 
| 
	bzero/arctic | 
	tests/integration/tickstore/test_ts_read.py | 
	3 | 
	19456 | 
	from datetime import datetime as dt
from mock import patch
import numpy as np
from numpy.testing.utils import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas as pd
from pandas.tseries.index import DatetimeIndex
import pytest
import pytz
from arctic.date import DateRange, mktz, CLOSED_CLOSED, CLOSED_OPEN, OPEN_CLOSED, OPEN_OPEN
from arctic.exceptions import NoDataFoundException
def test_read(tickstore_lib):
    data = [{'ASK': 1545.25,
                  'ASKSIZE': 1002.0,
                  'BID': 1545.0,
                  'BIDSIZE': 55.0,
                  'CUMVOL': 2187387.0,
                  'DELETED_TIME': 0,
                  'INSTRTYPE': 'FUT',
                  'PRICE': 1545.0,
                  'SIZE': 1.0,
                  'TICK_STATUS': 0,
                  'TRADEHIGH': 1561.75,
                  'TRADELOW': 1537.25,
                  'index': 1185076787070},
                 {'CUMVOL': 354.0,
                  'DELETED_TIME': 0,
                  'PRICE': 1543.75,
                  'SIZE': 354.0,
                  'TRADEHIGH': 1543.75,
                  'TRADELOW': 1543.75,
                  'index': 1185141600600}]
    tickstore_lib.write('FEED::SYMBOL', data)
    df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'])
    assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
    assert_array_equal(df['BID'].values, np.array([1545, np.nan]))
    assert_array_equal(df['PRICE'].values, np.array([1545, 1543.75]))
    assert_array_equal(df.index.values, np.array(['2007-07-22T04:59:47.070000000+0100',
                                                   '2007-07-22T23:00:00.600000000+0100'], dtype='datetime64[ns]'))
    assert tickstore_lib._collection.find_one()['c'] == 2
def test_read_symbol_as_column(tickstore_lib):
    data = [{'ASK': 1545.25,
                  'index': 1185076787070},
                 {'CUMVOL': 354.0,
                  'index': 1185141600600}]
    tickstore_lib.write('FEED::SYMBOL', data)
    df = tickstore_lib.read('FEED::SYMBOL', columns=['SYMBOL'])
    assert all(df['SYMBOL'].values == ['FEED::SYMBOL'])
def test_read_multiple_symbols(tickstore_lib):
    data1 = [{'ASK': 1545.25,
                  'ASKSIZE': 1002.0,
                  'BID': 1545.0,
                  'BIDSIZE': 55.0,
                  'CUMVOL': 2187387.0,
                  'DELETED_TIME': 0,
                  'INSTRTYPE': 'FUT',
                  'PRICE': 1545.0,
                  'SIZE': 1.0,
                  'TICK_STATUS': 0,
                  'TRADEHIGH': 1561.75,
                  'TRADELOW': 1537.25,
                  'index': 1185076787070}, ]
    data2 = [{'CUMVOL': 354.0,
                  'DELETED_TIME': 0,
                  'PRICE': 1543.75,
                  'SIZE': 354.0,
                  'TRADEHIGH': 1543.75,
                  'TRADELOW': 1543.75,
                  'index': 1185141600600}]
    tickstore_lib.write('BAR', data2)
    tickstore_lib.write('FOO', data1)
    df = tickstore_lib.read(['FOO', 'BAR'], columns=['BID', 'ASK', 'PRICE'])
    assert all(df['SYMBOL'].values == ['FOO', 'BAR'])
    assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
    assert_array_equal(df['BID'].values, np.array([1545, np.nan]))
    assert_array_equal(df['PRICE'].values, np.array([1545, 1543.75]))
    assert_array_equal(df.index.values, np.array(['2007-07-22T04:59:47.070000000+0100',
                                                   '2007-07-22T23:00:00.600000000+0100'], dtype='datetime64[ns]'))
    assert tickstore_lib._collection.find_one()['c'] == 1
@pytest.mark.parametrize('chunk_size', [1, 100])
def test_read_all_cols_all_dtypes(tickstore_lib, chunk_size):
    data = [{'f': 0.1,
            'of': 0.2,
            's': 's',
            'os': 'os',
            'l': 1,
            'ol': 2,
            'index': dt(1970, 1, 1, tzinfo=mktz('UTC')),
            },
            {'f': 0.3,
            'nf': 0.4,
            's': 't',
            'ns': 'ns',
            'l': 3,
            'nl': 4,
            'index': dt(1970, 1, 1, 0, 0, 1, tzinfo=mktz('UTC')),
            },
            ]
    tickstore_lib.chunk_size = 3
    tickstore_lib.write('sym', data)
    df = tickstore_lib.read('sym', columns=None)
    # The below is probably more trouble than it's worth, but we *should*
    # be able to roundtrip data and get the same answer...
    # Ints become floats
    data[0]['l'] = float(data[0]['l'])
    # Treat missing strings as None
    data[0]['ns'] = None
    data[1]['os'] = None
    index = DatetimeIndex([dt(1970, 1, 1, tzinfo=mktz('UTC')),
                         dt(1970, 1, 1, 0, 0, 1, tzinfo=mktz('UTC'))],
                        )
    index.tz = mktz()
    expected = pd.DataFrame(data, index=index)
    expected = expected[df.columns]
    assert_frame_equal(expected, df, check_names=False)
DUMMY_DATA = [
              {'a': 1.,
               'b': 2.,
               'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
               },
              {'b': 3.,
               'c': 4.,
               'index': dt(2013, 1, 2, tzinfo=mktz('Europe/London'))
               },
              {'b': 5.,
               'c': 6.,
               'index': dt(2013, 1, 3, tzinfo=mktz('Europe/London'))
               },
              {'b': 7.,
               'c': 8.,
               'index': dt(2013, 1, 4, tzinfo=mktz('Europe/London'))
               },
              {'b': 9.,
               'c': 10.,
               'index': dt(2013, 1, 5, tzinfo=mktz('Europe/London'))
               },
              ]
def test_date_range(tickstore_lib):
    tickstore_lib.write('SYM', DUMMY_DATA)
    df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130103), columns=None)
    assert_array_equal(df['a'].values, np.array([1, np.nan, np.nan]))
    assert_array_equal(df['b'].values, np.array([2., 3., 5.]))
    assert_array_equal(df['c'].values, np.array([np.nan, 4., 6.]))
    tickstore_lib.delete('SYM')
    # Chunk every 3 symbols and lets have some fun
    tickstore_lib.chunk_size = 3
    tickstore_lib.write('SYM', DUMMY_DATA)
    with patch.object(tickstore_lib._collection, 'find', side_effect=tickstore_lib._collection.find) as f:
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130103), columns=None)
        assert_array_equal(df['b'].values, np.array([2., 3., 5.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 1
        df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130103), columns=None)
        assert_array_equal(df['b'].values, np.array([3., 5.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 1
        df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130103), columns=None)
        assert_array_equal(df['b'].values, np.array([5.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 1
        df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130104), columns=None)
        assert_array_equal(df['b'].values, np.array([3., 5., 7.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 2
        df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130105), columns=None)
        assert_array_equal(df['b'].values, np.array([3., 5., 7., 9.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 2
        df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130104), columns=None)
        assert_array_equal(df['b'].values, np.array([5., 7.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 2
        df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130105), columns=None)
        assert_array_equal(df['b'].values, np.array([5., 7., 9.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 2
        df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105), columns=None)
        assert_array_equal(df['b'].values, np.array([7., 9.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 1
        # Test the different open-closed behaviours
        df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, CLOSED_CLOSED), columns=None)
        assert_array_equal(df['b'].values, np.array([7., 9.]))
        df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, CLOSED_OPEN), columns=None)
        assert_array_equal(df['b'].values, np.array([7.]))
        df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, OPEN_CLOSED), columns=None)
        assert_array_equal(df['b'].values, np.array([9.]))
        df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, OPEN_OPEN), columns=None)
        assert_array_equal(df['b'].values, np.array([]))
def test_date_range_end_not_in_range(tickstore_lib):
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
                   },
                  {'b': 3.,
                   'c': 4.,
                   'index': dt(2013, 1, 2, 10, 1, tzinfo=mktz('Europe/London'))
                   },
                  ]
    tickstore_lib.chunk_size = 1
    tickstore_lib.write('SYM', DUMMY_DATA)
    with patch.object(tickstore_lib._collection, 'find', side_effect=tickstore_lib._collection.find) as f:
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, dt(2013, 1, 2, 9, 0)), columns=None)
        assert_array_equal(df['b'].values, np.array([2.]))
        assert tickstore_lib._collection.find(f.call_args_list[-1][0][0]).count() == 1
@pytest.mark.parametrize('tz_name', ['UTC',
                                     'Europe/London',  # Sometimes ahead of UTC
                                     'America/New_York',  # Behind UTC
                                      ])
def test_date_range_default_timezone(tickstore_lib, tz_name):
    """
    We assume naive datetimes are user-local
    """
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 1, 1, tzinfo=mktz(tz_name))
                   },
                  # Half-way through the year
                  {'b': 3.,
                   'c': 4.,
                   'index': dt(2013, 7, 1, tzinfo=mktz(tz_name))
                   },
                  ]
    with patch('arctic.date._mktz.DEFAULT_TIME_ZONE_NAME', tz_name):
        tickstore_lib.chunk_size = 1
        tickstore_lib.write('SYM', DUMMY_DATA)
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130701), columns=None)
        assert len(df) == 2
        assert df.index[1] == dt(2013, 7, 1, tzinfo=mktz(tz_name))
        assert df.index.tz == mktz(tz_name)
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130101), columns=None)
        assert len(df) == 1
        df = tickstore_lib.read('SYM', date_range=DateRange(20130701, 20130701), columns=None)
        assert len(df) == 1
def test_date_range_no_bounds(tickstore_lib):
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
                   },
                  {'a': 3.,
                   'b': 4.,
                   'index': dt(2013, 1, 30, tzinfo=mktz('Europe/London'))
                   },
                  {'b': 5.,
                   'c': 6.,
                   'index': dt(2013, 2, 2, 10, 1, tzinfo=mktz('Europe/London'))
                   },
                  ]
    tickstore_lib.chunk_size = 1
    tickstore_lib.write('SYM', DUMMY_DATA)
    # 1) No start, no end
    df = tickstore_lib.read('SYM', columns=None)
    assert_array_equal(df['b'].values, np.array([2., 4.]))
    # 1.2) Start before the real start
    df = tickstore_lib.read('SYM', date_range=DateRange(20121231), columns=None)
    assert_array_equal(df['b'].values, np.array([2., 4.]))
    # 2.1) Only go one month out
    df = tickstore_lib.read('SYM', date_range=DateRange(20130101), columns=None)
    assert_array_equal(df['b'].values, np.array([2., 4.]))
    # 2.2) Only go one month out
    df = tickstore_lib.read('SYM', date_range=DateRange(20130102), columns=None)
    assert_array_equal(df['b'].values, np.array([4.]))
    # 3) No start
    df = tickstore_lib.read('SYM', date_range=DateRange(end=20130102), columns=None)
    assert_array_equal(df['b'].values, np.array([2.]))
    # 4) Outside bounds
    df = tickstore_lib.read('SYM', date_range=DateRange(end=20131212), columns=None)
    assert_array_equal(df['b'].values, np.array([2., 4., 5.]))
def test_date_range_BST(tickstore_lib):
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('Europe/London'))
                   },
                  {'a': 3.,
                   'b': 4.,
                   'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London'))
                   },
                  ]
    tickstore_lib.chunk_size = 1
    tickstore_lib.write('SYM', DUMMY_DATA)
    df = tickstore_lib.read('SYM', columns=None)
    assert_array_equal(df['b'].values, np.array([2., 4.]))
#     df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12),
#                                                                       dt(2013, 6, 1, 13)))
#     assert_array_equal(df['b'].values, np.array([2., 4.]))
    df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, tzinfo=mktz('Europe/London')),
                                                                            dt(2013, 6, 1, 13, tzinfo=mktz('Europe/London'))))
    assert_array_equal(df['b'].values, np.array([2., 4.]))
    df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, tzinfo=mktz('UTC')),
                                                                            dt(2013, 6, 1, 13, tzinfo=mktz('UTC'))))
    assert_array_equal(df['b'].values, np.array([4., ]))
def test_read_no_data(tickstore_lib):
    with pytest.raises(NoDataFoundException):
        tickstore_lib.read('missing_sym', DateRange(20131212, 20131212))
def test_write_no_tz(tickstore_lib):
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 6, 1, 12, 00)
                   }]
    with pytest.raises(ValueError):
        tickstore_lib.write('SYM', DUMMY_DATA)
def test_read_out_of_order(tickstore_lib):
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC'))
                   },
                  {'a': 3.,
                   'b': 4.,
                   'index': dt(2013, 6, 1, 11, 00, tzinfo=mktz('UTC'))  # Out-of-order
                   },
                  {'a': 3.,
                   'b': 4.,
                   'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('UTC'))
                   },
                  ]
    tickstore_lib.chunk_size = 3
    tickstore_lib.write('SYM', DUMMY_DATA)
    tickstore_lib.read('SYM', columns=None)
    assert len(tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, tzinfo=mktz('UTC')), dt(2013, 6, 2, tzinfo=mktz('UTC'))))) == 3
    assert len(tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, tzinfo=mktz('UTC')), dt(2013, 6, 1, 12, tzinfo=mktz('UTC'))))) == 2
def test_read_longs(tickstore_lib):
    DUMMY_DATA = [
                  {'a': 1,
                   'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('Europe/London'))
                   },
                  {
                   'b': 4,
                   'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London'))
                   },
                  ]
    tickstore_lib.chunk_size = 3
    tickstore_lib.write('SYM', DUMMY_DATA)
    tickstore_lib.read('SYM', columns=None)
    read = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1), dt(2013, 6, 2)))
    assert read['a'][0] == 1
    assert np.isnan(read['b'][0])
def test_read_with_image(tickstore_lib):
    DUMMY_DATA = [
              {'a': 1.,
               'index': dt(2013, 1, 1, 11, 00, tzinfo=mktz('Europe/London'))
               },
              {
               'b': 4.,
               'index': dt(2013, 1, 1, 12, 00, tzinfo=mktz('Europe/London'))
               },
              ]
    # Add an image
    tickstore_lib.write('SYM', DUMMY_DATA)
    tickstore_lib._collection.update_one({},
                                     {'$set':
                                      {'im': {'i':
                                              {'a': 37.,
                                               'c': 2.,
                                               },
                                              't': dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
                                              }
                                       }
                                      }
                                     )
    dr = DateRange(dt(2013, 1, 1), dt(2013, 1, 2))
    # tickstore_lib.read('SYM', columns=None)
    df = tickstore_lib.read('SYM', columns=None, date_range=dr)
    assert df['a'][0] == 1
    # Read with the image as well - all columns
    df = tickstore_lib.read('SYM', columns=None, date_range=dr, include_images=True)
    assert set(df.columns) == set(('a', 'b', 'c'))
    assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
    assert_array_equal(df['b'].values, np.array([np.nan, np.nan, 4]))
    assert_array_equal(df['c'].values, np.array([2, np.nan, np.nan]))
    assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
    assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
    assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
    # Read just columns from the updates
    df = tickstore_lib.read('SYM', columns=('a', 'b'), date_range=dr, include_images=True)
    assert set(df.columns) == set(('a', 'b'))
    assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
    assert_array_equal(df['b'].values, np.array([np.nan, np.nan, 4]))
    assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
    assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
    assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
    
    # Read one column from the updates
    df = tickstore_lib.read('SYM', columns=('a',), date_range=dr, include_images=True)
    assert set(df.columns) == set(('a',))
    assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
    assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
    assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
    assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
    # Read just the image column
    df = tickstore_lib.read('SYM', columns=['c'], date_range=dr, include_images=True)
    assert set(df.columns) == set(['c'])
    assert_array_equal(df['c'].values, np.array([2, np.nan, np.nan]))
    assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
    assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
    assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
 | 
	lgpl-2.1 | 
| 
	pwwang/bioprocs | 
	bioprocs/scripts/tumhet/pPyClone.py | 
	1 | 
	12468 | 
	import cmdy
from pysam import VariantFile
from diot import Diot
from os import path, environ
from bioprocs.utils import logger, shell2 as shell
from bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord
inmuts   = {{i.muts | quote}}
incnvs   = {{i.cnvs | quote}}
outdir   = {{o.outdir | quote}}
pyclone  = {{args.pyclone | quote}}
bcftools = {{args.bcftools | quote}}
bedtools = {{args.bedtools | quote}}
params   = {{args.params | repr}}
nthread  = {{args.nthread | int }}
refgene  = {{args.refgene | quote }}
mutctrl  = {{args.mutctrl | repr}}
cnctrl   = {{args.cnctrl | repr}}
env = environ.copy()
env.update(dict(
	OPENBLAS_NUM_THREADS = str(nthread),
	OMP_NUM_THREADS      = str(nthread),
	NUMEXPR_NUM_THREADS  = str(nthread),
	MKL_NUM_THREADS      = str(nthread)
) if nthread else {})
shell.load_config(pyclone = dict(
	_exe = pyclone,
	_cwd = outdir,
	_env = env
), bcftools = bcftools, bedtools = bedtools)
def defaultCNs(chrom, gt):
	# normal_cn, minor_cn, major_cn
	ret = [2, 0, 2]
	if gt == 'AA':
		ret[0] = 1 if chrom == 'chrY' else 2 # sex information needed for chrX
		ret[1] = 0
		ret[2] = ret[0]
	elif gt == 'AB':
		ret[0] = 2
		ret[1] = 1
		ret[2] = 1
	else: # BB
		ret[0] = 1 if chrom == 'chrY' else 2
		ret[1] = ret[0]
		ret[2] = 0
	return ret
def singleSampleMutVCF2BED(vcffiles):
	"""
	Convert single(paired)-sample VCFs to a BED file like:
	#CHROM  START   END	    NAME      GENOTYPE REFCOUNTS VARCOUNTS CASE    VARFREQ
	chr1    70820   70820   chr:70820 BB       1083      1996      NA12156 0.648
	"""
	ret = {}
	for vcffile in vcffiles:
		samples = shell.bcftools.query(l = vcffile).strip().splitlines()
		assert len(samples) <= 2
		samidx  = 0 if len(samples) == 1 or mutctrl in (1, -1, None) else 1
		sample  = samples[samidx]
		ret[sample] = path.join(outdir, sample + '.muts.bed')
		writer = TsvWriter(ret[sample])
		writer.cnames = ['CHROM', 'START', 'END', 'NAME', 'GENOTYPE', 'REFCOUNTS', 'VARCOUNTS', 'CASE', 'VARFREQ']
		for line in shell.bcftools.query(
			_ = vcffile,
			s = sample,
			f = r'[%CHROM\t%POS\t%POS\t%CHROM:%POS\t%GT\t%AD{0}\t%AD{1}\t%SAMPLE\t%AF\n]'):
			items = line.split('\t')
			if items[4] in ('0/0', '0|0'):
				items[4] = 'AA'
			elif items[4] in ('0/1', '0|1'):
				items[4] = 'AB'
			elif items[4] in ('1/1', '1|1'):
				items[4] = 'BB'
			writer.write(items)
		writer.close()
	return ret
def multiSampleMutVCF2BED(vcffile):
	ret = {}
	writers = {}
	samples = shell.bcftools.query(l = vcffile).strip().splitlines()
	samples = [sample for i, sample in samples
		if mutctrl is None or i != (mutctrl if mutctrl >= 0 else len(samples) + mutctrl)]
	for sample in samples:
		ret[sample] = path.join(outdir, sample + '.muts.bed')
		writer = TsvWriter(sample)
		writer.cnames = ['CHROM', 'START', 'END', 'NAME', 'GENOTYPE', 'REFCOUNTS', 'VARCOUNTS', 'CASE', 'VARFREQ']
		writers[sample] = writer
	for line in shell.bcftools.query(
		_ = vcffile,
		s = ','.join(samples),
		f = r'[%CHROM\t%POS\t%POS\t%CHROM:%POS\t%GT\t%AD{0}\t%AD{1}\t%SAMPLE\t%AF\n]'):
		items = line.split('\t')
		writer = writers[items[7]]
		if items[4] in ('0/0', '0|0'):
			items[4] = 'AA'
		elif items[4] in ('0/1', '0|1'):
			items[4] = 'AB'
		elif items[4] in ('1/1', '1|1'):
			items[4] = 'BB'
		writer.write(items)
	for writer in writers.values():
		writer.close()
	return ret
def MAF2BED(maffile):
	reader = TsvReader(maffile)
	if 't_alt_count' not in reader.cnames:
		raise ValueError('t_alt_count not found in MAF file.')
	if 't_ref_count' not in reader.cnames:
		raise ValueError('t_ref_count not found in MAF file.')
	ret = {}
	writers = {}
	for r in reader:
		r.CHROM = r.Chromosome
		r.START = r.Start_Position
		r.END   = r.End_Position
		r.NAME  = '%s:%s' % (r.CHROM, r.START)
		r.GENOTYPE = 'AA' if r.Tumor_Seq_Allele1 == r.Tumor_Seq_Allele2 \
						and r.Tumor_Seq_Allele1 == r.Reference_Allele else \
					 'BB' if r.Tumor_Seq_Allele1 == r.Tumor_Seq_Allele2 \
						and r.Tumor_Seq_Allele1 != r.Reference_Allele else 'AB'
		r.REFCOUNTS = r.t_ref_count
		r.VARCOUNTS = r.t_alt_count
		r.CASE      = r.Tumor_Sample_Barcode
		try:
			varcount  = float(r.VARCOUNTS)
			refcount  = float(r.REFCOUNTS)
			depth     = float(r.get('t_depth', 0))
			if depth == 0:
				depth = varcount + refcount
			r.VARFREQ = varcount / depth
		except (ValueError, TypeError, ZeroDivisionError):
			logger.warning('Variant %s drop due to unknown t_ref_count(%s) or t_alt_count(%s)' % (
				r.NAME,
				r.t_ref_count,
				r.t_alt_count
			))
			continue
		if r.CASE not in ret:
			ret[r.CASE] = path.join(outdir, r.CASE + '.muts.bed')
			writers[r.CASE] = TsvWriter(ret[r.CASE])
			writers[r.CASE].cnames = ['CHROM', 'START', 'END', 'NAME', 'GENOTYPE', 'REFCOUNTS', 'VARCOUNTS', 'CASE', 'VARFREQ']
		writers[r.CASE].write(r)
	for writer in writers.values():
		writer.close()
	reader.close()
	return ret
def PyCloneMutTSV2BED(pcfile):
	ret = {}
	writers = {}
	reader = TsvReader(pcfile)
	for r in reader:
		if ':' not in r.mutation_id:
			raise "`mutation_id` should end with `<chr>:<pos>`"
		chr, pos = r.mutation_id.split(':')[:-2]
		r.CHROM     = chr
		r.START     = pos
		r.END       = pos
		r.NAME      = '%s:%s' % (chr, pos)
		r.GENOTYPE  = r.genotype
		r.REFCOUNTS = r.ref_counts
		r.VARCOUNTS = r.var_counts
		r.CASE      = r.variant_case
		r.VARFREQ   = r.variant_freq
		if r.CASE not in ret:
			ret[r.CASE] = path.join(outdir, r.CASE + '.muts.bed')
			writers[r.CASE] = TsvWriter(ret[r.CASE])
			writers[r.CASE].cnames = ['CHROM', 'START', 'END', 'NAME', 'GENOTYPE', 'REFCOUNTS', 'VARCOUNTS', 'CASE', 'VARFREQ']
		writers[r.CASE].write(r)
	for writer in writers.values():
		writer.close()
	reader.close()
	return ret
def singleSampleCnVCF2BED(vcffiles):
	"""
	Convert single(paired)-sample VCFs to a BED file like:
	#CHROM  START   END	    GENOTYPE NORMAL_CN MINOR_CN MAJOR_CN CASE
	chr1    70820   70820   BB       2         0        2        NA12156
	"""
	ret = {}
	for vcffile in vcffiles:
		samples = shell.bcftools.query(l = vcffile).strip().splitlines()
		assert len(samples) <= 2
		samidx  = 0 if len(samples) == 1 or cnctrl in (1, -1, None) else 1
		sample  = samples[samidx]
		ret[sample] = path.join(outdir, sample + '.cn.bed')
		writer = TsvWriter(ret[sample])
		writer.cnames = ['CHROM', 'START', 'END', 'GENOTYPE', 'NORMAL_CN', 'MINOR_CN', 'MAJOR_CN', 'CASE']
		for line in shell.bcftools.query(
			_ = vcffile,
			s = sample,
			f = r'%CHROM\t%POS\t%POS\t%GT\t2\t0\t2\t%SAMPLE\n'):
			items = line.split('\t')
			if items[3] in ('0/0', '0|0'):
				items[3] = 'AA'
			elif items[3] in ('0/1', '0|1'):
				items[3] = 'AB'
			elif items[3] in ('1/1', '1|1'):
				items[3] = 'BB'
			items[4], items[5], items[6] = defaultCNs(items[0], items[3])
			writer.write(items)
		writer.close()
	return ret
def multiSampleCnVCF2BED(vcffile):
	ret = {}
	writers = {}
	samples = shell.bcftools.query(l = vcffile).strip().splitlines()
	samples = [sample for i, sample in samples
		if cnctrl is None or i != (cnctrl if cnctrl >= 0 else len(samples) + cnctrl)]
	for sample in samples:
		ret[sample] = path.join(outdir, sample + '.cn.bed')
		writer = TsvWriter(sample)
		writer.cnames = ['CHROM', 'START', 'END', 'GENOTYPE', 'NORMAL_CN', 'MINOR_CN', 'MAJOR_CN', 'CASE']
		writers[sample] = writer
	for line in shell.bcftools.query(
		_ = vcffile,
		s = ','.join(samples),
		f = r'[%CHROM\t%POS\t%POS\t%GT\t2\t0\t2\t%SAMPLE\n]'):
		items = line.split('\t')
		if items[3] in ('0/0', '0|0'):
			items[3] = 'AA'
		elif items[3] in ('0/1', '0|1'):
			items[3] = 'AB'
		elif items[3] in ('1/1', '1|1'):
			items[3] = 'BB'
		items[4], items[5], items[6] = defaultCNs(items[0], items[3])
		writer = writers[items[7]]
		writer.write(items)
	for writer in writers.values():
		writer.close()
	return ret
def PyCloneCnTSV2BED(pcfile):
	reader = TsvReader(pcfile)
	ret = {}
	writers = {}
	for r in reader:
		if ':' not in r.mutation_id:
			raise "`mutation_id` should end with `<chr>:<pos>`"
		chr, pos = r.mutation_id.split(':')[:-2]
		r.CHROM     = chr
		r.START     = pos
		r.END       = pos
		r.GENOTYPE  = r.genotype
		r.CASE      = r.variant_case
		r.NORMAL_CN = r.normal_cn
		r.MINOR_CN  = r.minor_cn
		r.MAJOR_CN  = r.major_cn
		if r.CASE not in ret:
			ret[r.CASE] = path.join(outdir, r.CASE + '.cn.bed')
			writers[r.CASE] = TsvWriter(ret[r.CASE])
			writers[r.CASE].cnames = ['CHROM', 'START', 'END', 'GENOTYPE', 'NORMAL_CN', 'MINOR_CN', 'MAJOR_CN', 'CASE']
		writers[r.CASE].write(r)
	for writer in writers.values():
		writer.close()
	reader.close()
	return ret
def GetPyCloneTsv(mutfile, outfile, cnfile = None):
	# mutbed:
	# #CHROM  START   END	    NAME      GENOTYPE REFCOUNTS VARCOUNTS CASE    VARFREQ
	# cnbed:
	# #CHROM  START   END	    GENOTYPE NORMAL_CN MINOR_CN MAJOR_CN CASE
	# outfile:
	# mutation_id	ref_counts	var_counts	normal_cn	minor_cn	major_cn	variant_case	variant_freq	genotype
	writer = TsvWriter(outfile)
	writer.cnames = ['mutation_id', 'ref_counts', 'var_counts', 'normal_cn', 'minor_cn', 'major_cn', 'variant_case', 'variant_freq', 'genotype']
	writer.writeHead()
	if not cnfile:
		reader = TsvReader(mutbed)
		reader.cnames = ['CHROM', 'START', 'END', 'NAME', 'GENOTYPE', 'REFCOUNTS', 'VARCOUNTS', 'CASE', 'VARFREQ']
		for r in reader:
			r.mutation_id = r.NAME
			r.ref_counts = r.REFCOUNTS
			r.var_counts = r.VARCOUNTS
			r.normal_cn, r.minor_cn, r.major_cn = defaultCNs(r.CHROM, r.GENOTYPE)
			r.variant_case = r.CASE
			r.variant_freq = r.VARFREQ
			r.genotype = r.GENOTYPE
			writer.write(r)
		writer.close()
		return
	for line in shell.bedtools.intersect(
		a = mutfile,
		b = cnfile,
		loj = True):
		# CHROM  START END NAME GENOTYPE REFCOUNTS VARCOUNTS CASE VARFREQ
		# 0      1     2   3    4        5         6         7    8
		# CHROM START END GENOTYPE NORMAL_CN MINOR_CN MAJOR_CN CASE
		# 9     10    11  12       13        14       15       16
		parts = line.split('\t')
		rec = TsvRecord()
		rec.mutation_id = parts[3]
		rec.ref_counts = parts[5]
		rec.var_counts = parts[6]
		rec.variant_case = parts[7]
		rec.variant_freq = parts[8]
		rec.genotype = parts[4]
		if parts[9] == '.':
			rec.normal_cn, rec.minor_cn, rec.major_cn = defaultCNs(parts[0], parts[4])
		else:
			rec.normal_cn, rec.minor_cn, rec.major_cn = parts[13:16]
		writer.write(rec)
	writer.close()
params  = Diot(in_files = [])
mutbeds = {}
cnbeds  = {}
if path.isfile(inmuts):
	if inmuts.endswith('.vcf') or inmuts.endswith('.vcf.gz'):
		mutbeds = multiSampleMutVCF2BED(inmuts)
	elif inmuts.endswith('.maf') or inmuts.endswith('.maf.gz'):
		mutbeds = MAF2BED(inmuts)
	else:
		mutbeds = PyCloneMutTSV2BED(inmuts)
else:
	inmuts = inmuts.split(',')
	mutbeds = singleSampleMutVCF2BED(inmuts)
if incnvs and path.isfile(incnvs):
	if incnvs.endswith('.vcf') or incnvs.endswith('.vcf.gz'):
		cnbeds = multiSampleCnVCF2BED(incnvs)
	else:
		cnbeds = PyCloneCnTSV2BED(incnvs)
elif incnvs:
	incnvs = incnvs.split(',')
	cnbeds = singleSampleCnVCF2BED(incnvs)
for sample, mutbed in mutbeds.items():
	pcfile = path.join(outdir, sample + '.tsv')
	params.in_files.append(pcfile)
	GetPyCloneTsv(mutbed, pcfile, cnbeds.get(sample))
#PyClone run_analysis_pipeline --in_files A.tsv B.tsv C.tsv --working_dir pyclone_analysis
# create matplotlibrc file
with open(path.join(outdir, 'matplotlibrc'), 'w') as f:
	f.write('backend: Agg\n')
params.working_dir      = outdir
params.prior            = 'total_copy_number'
params.plot_file_format = 'svg'
params._env             = env
params._raise = False
c = shell.fg.pyclone.run_analysis_pipeline(**params)
if c.rc != 0:
	# Let it go if 'No mutations found in common across samples'
	exit(0)
# annotate tables/loci.tsv with genes
"""
mutation_id	sample_id	cluster_id	cellular_prevalence	cellular_prevalence_std	variant_allele_frequency
chr10:102776870	AK_nevus_1	0	0.656	0.127	0.417
chr10:102776870	AK_nevus_2	0	0.432	0.126	0.333
"""
locifile = path.join(outdir, 'tables', 'loci.tsv')
locifilebed = path.join(outdir, 'tables', 'loci.bed')
reader = TsvReader(locifile)
writer = TsvWriter(locifilebed)
writer.cnames = ['CHR', 'START', 'END'] + reader.cnames
for r in reader:
	r.CHR, r.START = r.mutation_id.split(':')
	r.END = r.START
	writer.write(r)
reader.close()
writer.close()
writer = TsvWriter(locifile)
writer.cnames = reader.cnames + ['gene']
writer.writeHead()
for line in shell.bedtools.intersect(a = locifilebed, b = refgene, loj = True, _iter = True):
	parts = line.split('\t')
	r = parts[3:9] + [parts[17].split('; ')[0][9:-1]]
	writer.write(r)
writer.close()
 | 
	mit | 
| 
	ch3ll0v3k/scikit-learn | 
	sklearn/ensemble/voting_classifier.py | 
	178 | 
	8006 | 
	"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
#          Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
    """Soft Voting/Majority Rule classifier for unfitted estimators.
    Read more in the :ref:`User Guide <voting_classifier>`.
    Parameters
    ----------
    estimators : list of (string, estimator) tuples
        Invoking the `fit` method on the `VotingClassifier` will fit clones
        of those original estimators that will be stored in the class attribute
        `self.estimators_`.
    voting : str, {'hard', 'soft'} (default='hard')
        If 'hard', uses predicted class labels for majority rule voting.
        Else if 'soft', predicts the class label based on the argmax of
        the sums of the predicted probalities, which is recommended for
        an ensemble of well-calibrated classifiers.
    weights : array-like, shape = [n_classifiers], optional (default=`None`)
        Sequence of weights (`float` or `int`) to weight the occurances of
        predicted class labels (`hard` voting) or class probabilities
        before averaging (`soft` voting). Uses uniform weights if `None`.
    Attributes
    ----------
    classes_ : array-like, shape = [n_predictions]
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.linear_model import LogisticRegression
    >>> from sklearn.naive_bayes import GaussianNB
    >>> from sklearn.ensemble import RandomForestClassifier
    >>> clf1 = LogisticRegression(random_state=1)
    >>> clf2 = RandomForestClassifier(random_state=1)
    >>> clf3 = GaussianNB()
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> y = np.array([1, 1, 1, 2, 2, 2])
    >>> eclf1 = VotingClassifier(estimators=[
    ...         ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
    >>> eclf1 = eclf1.fit(X, y)
    >>> print(eclf1.predict(X))
    [1 1 1 2 2 2]
    >>> eclf2 = VotingClassifier(estimators=[
    ...         ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
    ...         voting='soft')
    >>> eclf2 = eclf2.fit(X, y)
    >>> print(eclf2.predict(X))
    [1 1 1 2 2 2]
    >>> eclf3 = VotingClassifier(estimators=[
    ...        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
    ...        voting='soft', weights=[2,1,1])
    >>> eclf3 = eclf3.fit(X, y)
    >>> print(eclf3.predict(X))
    [1 1 1 2 2 2]
    >>>
    """
    def __init__(self, estimators, voting='hard', weights=None):
        self.estimators = estimators
        self.named_estimators = dict(estimators)
        self.voting = voting
        self.weights = weights
    def fit(self, X, y):
        """ Fit the estimators.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples and
            n_features is the number of features.
        y : array-like, shape = [n_samples]
            Target values.
        Returns
        -------
        self : object
        """
        if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
            raise NotImplementedError('Multilabel and multi-output'
                                      ' classification is not supported.')
        if self.voting not in ('soft', 'hard'):
            raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
                             % self.voting)
        if self.weights and len(self.weights) != len(self.estimators):
            raise ValueError('Number of classifiers and weights must be equal'
                             '; got %d weights, %d estimators'
                             % (len(self.weights), len(self.estimators)))
        self.le_ = LabelEncoder()
        self.le_.fit(y)
        self.classes_ = self.le_.classes_
        self.estimators_ = []
        for name, clf in self.estimators:
            fitted_clf = clone(clf).fit(X, self.le_.transform(y))
            self.estimators_.append(fitted_clf)
        return self
    def predict(self, X):
        """ Predict class labels for X.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples and
            n_features is the number of features.
        Returns
        ----------
        maj : array-like, shape = [n_samples]
            Predicted class labels.
        """
        if self.voting == 'soft':
            maj = np.argmax(self.predict_proba(X), axis=1)
        else:  # 'hard' voting
            predictions = self._predict(X)
            maj = np.apply_along_axis(lambda x:
                                      np.argmax(np.bincount(x,
                                                weights=self.weights)),
                                      axis=1,
                                      arr=predictions)
        maj = self.le_.inverse_transform(maj)
        return maj
    def _collect_probas(self, X):
        """Collect results from clf.predict calls. """
        return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
    def _predict_proba(self, X):
        """Predict class probabilities for X in 'soft' voting """
        avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
        return avg
    @property
    def predict_proba(self):
        """Compute probabilities of possible outcomes for samples in X.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples and
            n_features is the number of features.
        Returns
        ----------
        avg : array-like, shape = [n_samples, n_classes]
            Weighted average probability for each class per sample.
        """
        if self.voting == 'hard':
            raise AttributeError("predict_proba is not available when"
                                 " voting=%r" % self.voting)
        return self._predict_proba
    def transform(self, X):
        """Return class labels or probabilities for X for each estimator.
        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples and
            n_features is the number of features.
        Returns
        -------
        If `voting='soft'`:
          array-like = [n_classifiers, n_samples, n_classes]
            Class probabilties calculated by each classifier.
        If `voting='hard'`:
          array-like = [n_classifiers, n_samples]
            Class labels predicted by each classifier.
        """
        if self.voting == 'soft':
            return self._collect_probas(X)
        else:
            return self._predict(X)
    def get_params(self, deep=True):
        """Return estimator parameter names for GridSearch support"""
        if not deep:
            return super(VotingClassifier, self).get_params(deep=False)
        else:
            out = super(VotingClassifier, self).get_params(deep=False)
            out.update(self.named_estimators.copy())
            for name, step in six.iteritems(self.named_estimators):
                for key, value in six.iteritems(step.get_params(deep=True)):
                    out['%s__%s' % (name, key)] = value
            return out
    def _predict(self, X):
        """Collect results from clf.predict calls. """
        return np.asarray([clf.predict(X) for clf in self.estimators_]).T
 | 
	bsd-3-clause | 
| 
	uditsharma7/Machine-Learning | 
	MLP_Classifier/mlp.py | 
	1 | 
	3408 | 
	import numpy as np
from numpy import ravel
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from pybrain.datasets import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
trndata=ClassificationDataSet(64,nb_classes=10)
tstdata=ClassificationDataSet(64,nb_classes=10)
#tstdata = SupervisedDataSet( 64, 10)
train = open('optdigits_tra.csv','r')
test = open('optdigits_tes.csv','r')
outdata=()
for line in train.readlines():
	data = [int(x) for x in line.strip().split(',') if x != '']
	indata =  tuple(data[:64])
	outdata = tuple(data[64:])
	trndata.addSample(indata,outdata)
Y_test=[]
for line in test.readlines():
	data = [int(x) for x in line.strip().split(',') if x != '']
	Y_test.append(data[64:])
	indata =  tuple(data[:64])
	outdata = tuple(data[64:])
	tstdata.addSample(indata,outdata)
trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )
NUM_EPOCHS =100 
NUM_HIDDEN_UNITS = 25
n = buildNetwork(trndata.indim,NUM_HIDDEN_UNITS,10,trndata.outdim,bias=True)
trainer = BackpropTrainer(n,trndata,learningrate=0.01,momentum=0.7,verbose=True)
#trainer.trainUntilConvergence(trndata,NUM_EPOCHS)
for i in range(NUM_EPOCHS):
	trainer.train()
Y_pred = []
Y_pred=(np.array([(n.activate(x)).argmax() for x, _ in tstdata]))
Y_test=np.array(Y_test)
Y_pred=np.array(Y_pred)
Y_test=Y_test.ravel()
print Y_test
print Y_pred
print "Confusion Matrix"
cm = confusion_matrix(Y_test, Y_pred)
print cm
print "Accuracy on test set: %7.4f" % (100*accuracy_score(Y_test,Y_pred)) 
c0_a = 0
c0_p = 0
c1_a = 0
c1_p = 0
c2_a = 0
c2_p = 0
c3_a = 0
c3_p = 0
c4_a = 0
c4_p = 0
c5_a = 0
c5_p = 0
c6_a = 0
c6_p = 0
c7_a = 0
c7_p = 0
c8_a = 0
c8_p = 0
c9_a = 0
c9_p = 0
print
for i in range(len(Y_test)):
    if Y_test[i] == 0:
        c0_a += 1
        if Y_pred[i] == 0:
            c0_p += 1
    elif Y_test[i]==1:
        c1_a +=1
        if Y_pred[i] == 1:
            c1_p += 1
    elif Y_test[i]==2:
        c2_a +=1
        if Y_pred[i] == 2:
            c2_p += 1
    elif Y_test[i]==3:
        c3_a +=1
        if Y_pred[i] == 3:
            c3_p += 1
    elif Y_test[i]==4:
        c4_a +=1
        if Y_pred[i] == 4:
            c4_p += 1
    elif Y_test[i]==5:
        c5_a +=1
        if Y_pred[i] == 5:
            c5_p += 1
    elif Y_test[i]==6:
        c6_a +=1
        if Y_pred[i] == 6:
            c6_p += 1
    elif Y_test[i]==7:
        c7_a +=1
        if Y_pred[i] == 7:
            c7_p += 1
    elif Y_test[i]==8:
        c8_a +=1
        if Y_pred[i] == 8:
            c8_p += 1
    elif Y_test[i]==9:
        c9_a +=1
        if Y_pred[i] == 9:
            c9_p += 1
print
if c0_a !=0:
    print "Class 0 Accuracy: %f"%(c0_p*100.0/c0_a)
if c1_a !=0:
    print "Class 1 Accuracy: %f"%(c1_p*100.0/c1_a)
if c2_a !=0:
    print "Class 2 Accuracy: %f"%(c2_p*100.0/c2_a)
if c3_a !=0:
    print "Class 3 Accuracy: %f"%(c3_p*100.0/c3_a)
if c4_a !=0:
    print "Class 4 Accuracy: %f"%(c4_p*100.0/c4_a)
if c5_a !=0:
    print "Class 5 Accuracy: %f"%(c5_p*100.0/c5_a)
if c6_a !=0:
    print "Class 6 Accuracy: %f"%(c6_p*100.0/c6_a)
if c7_a !=0:
    print "Class 7 Accuracy: %f"%(c7_p*100.0/c7_a)
if c8_a !=0:
    print "Class 8 Accuracy: %f"%(c8_p*100.0/c8_a)
if c9_a !=0:
    print "Class 9 Accuracy: %f"%(c9_p*100.0/c9_a)
 | 
	gpl-3.0 | 
| 
	anoopkunchukuttan/transliterator | 
	src/cfilt/transliteration/phonetic_sim.py | 
	1 | 
	11004 | 
	#Copyright Anoop Kunchukuttan 2015 - present
# 
#This file is part of the IITB Unsupervised Transliterator 
#
#IITB Unsupervised Transliterator is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#IITB Unsupervised Transliterator  is distributed in the hope that it will be useful, 
#but WITHOUT ANY WARRANTY; without even the implied warranty of 
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
#GNU General Public License for more details. 
#
#You should have received a copy of the GNU General Public License 
#along with IITB Unsupervised Transliterator.   If not, see <http://www.gnu.org/licenses/>.
from indicnlp import loader
from indicnlp import langinfo
from indicnlp.script.indic_scripts import * 
import numpy as np
import gzip
import pandas as pd
import codecs,sys 
from cfilt.transliteration.analysis import align
def equal(v1,v2): 
    return 0.0 if  np.sum( xor_vectors(v1, v2)) > 0  else 1.0
def dice(v1,v2):
    dotprod=2*float(np.dot( v1, v2.T ))
    return dotprod/float(len(v1)+len(v2))
def jaccard(v1,v2):
    dotprod=float(np.dot( v1, v2.T ))
    return dotprod/float(len(v1)+len(v2)-dotprod)
def cosine(v1,v2):
    dotprod=float(np.dot( v1, v2.T ))
    norm1=float(np.dot( v1, v1.T ))
    norm2=float(np.dot( v2, v2.T ))
    return ((dotprod)/(np.sqrt(norm1*norm2)+0.00001))
def dotprod(v1,v2): 
    return float(np.dot( v1, v2.T ))
def sim1(v1,v2,base=5.0): 
    return np.power(base,dotprod(v1,v2)) 
def softmax(v1,v2): 
    return sim1(v1,v2,np.e)
def sim2(v1,v2,base=5.0): 
    ## Weight vector
    # phonetic_weight_vector=np.array([
    #     60.0,60.0,60.0,60.0,60.0,60.0,
    #     1.0,1.0,
    #     30.0,30.0,30.0,
    #     40.0,40.0,
    #     50.0,50.0,50.0,50.0,50.0,
    #     40.0,40.0,40.0,40.0,40.0,
    #     5.0,5.0,
    #     10.0,10.0,
    #     10.0,10.0,
    # ])
    
    phonetic_weight_vector=np.array([
        #6.0,6.0,6.0,6.0,6.0,6.0,
        0.01,0.01,0.01,0.01,0.01,0.01,
        0.1,0.1,
        3.0,3.0,3.0,
        4.0,4.0,
        5.0,5.0,5.0,5.0,5.0,
        4.0,4.0,4.0,4.0,4.0,
        0.5,0.5,
        1.0,1.0,
        1.0,1.0,
    ])
    v1_weighted=np.multiply(v1,phonetic_weight_vector)
    dotprod=float(np.dot( v1_weighted, v2.T ))
    #return np.power(base,dotprod) 
    return dotprod
def accumulate_vectors(v1,v2): 
    """
    not a commutative operation
    """
    if is_consonant(v1) and is_halant(v2): 
        v1[PVIDX_BT_HALANT]=1
        return v1
    elif is_consonant(v1) and is_nukta(v2): 
        v1[PVIDX_BT_NUKTA]=1
        return v1
    elif is_consonant(v1) and is_dependent_vowel(v2): 
        return or_vectors(v1,v2)
    elif is_anusvaar(v1) and is_consonant(v2): 
        return or_vectors(v1,v2)
    else: 
        return invalid_vector()
def create_similarity_matrix(sim_func,slang,tlang,normalize=True):
    dim=langinfo.COORDINATED_RANGE_END_INCLUSIVE-langinfo.COORDINATED_RANGE_START_INCLUSIVE+1    
    sim_mat=np.zeros((dim,dim))    
    for offset1 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1): 
        v1=get_phonetic_feature_vector(offset_to_char(offset1,slang),slang)
        for offset2 in xrange(langinfo.COORDINATED_RANGE_START_INCLUSIVE, langinfo.COORDINATED_RANGE_END_INCLUSIVE+1): 
            v2=get_phonetic_feature_vector(offset_to_char(offset2,tlang),tlang)
            sim_mat[offset1,offset2]=sim_func(v1,v2)
    if normalize: 
        sums=np.sum(sim_mat, axis=1)
        sim_mat=(sim_mat.transpose()/sums).transpose()
    return sim_mat
#def score_phonetic_alignment(srcw,tgtw,slang,tlang,sim_matrix,mismatch_p=-0.2,gap_start_p=-0.05,gap_extend_p=0.0):
#
#    score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
#    soff=[ get_offset(c,slang) for c in srcw ]
#    toff=[ get_offset(c,tlang) for c in tgtw ]
#
#    score_mat[:,0]=np.array([si*gap_start_p for si in xrange(score_mat.shape[0])])
#    score_mat[0,:]=np.array([ti*gap_start_p for ti in xrange(score_mat.shape[1])])
#
#    for si,sc in enumerate(soff,1): 
#        for ti,tc in enumerate(toff,1): 
#            score_mat[si,ti]= max(
#                    score_mat[si-1,ti-1]+(sim_matrix[sc,tc] if ( sc>=0 and tc>=0 and sc<sim_matrix.shape[0] and tc<sim_matrix.shape[1]) else mismatch_p),
#                    score_mat[si,ti-1]+gap_start_p,
#                    score_mat[si-1,ti]+gap_start_p,
#                )
#    return score_mat[-1,-1]/float(max(len(srcw),len(tgtw)))
def score_phonetic_alignment(srcw,tgtw,slang,tlang,sim_matrix,mismatch_p=-0.2,gap_start_p=-0.05,gap_extend_p=0.0):
    score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
    score_mat[:,0]=np.array([si*gap_start_p for si in xrange(score_mat.shape[0])])
    score_mat[0,:]=np.array([ti*gap_start_p for ti in xrange(score_mat.shape[1])])
    for si,sc in enumerate(srcw,1): 
        for ti,tc in enumerate(tgtw,1): 
            so=get_offset(sc,slang)
            to=get_offset(tc,tlang)
            score_mat[si,ti]= max(
                    score_mat[si-1,ti-1]+(sim_matrix[so,to] if ( so>=0 and to>=0 and so<sim_matrix.shape[0] and to<sim_matrix.shape[1]) else mismatch_p),
                    score_mat[si,ti-1]+gap_start_p,
                    score_mat[si-1,ti]+gap_start_p,
                )
    return score_mat[-1,-1]/float(max(len(srcw),len(tgtw)))
def score_ned_similarity(srcw,tgtw,slang,tlang,w_del=1.0,w_ins=1.0,w_sub=1.0):
    score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
    score_mat[:,0]=np.array([si*w_del for si in xrange(score_mat.shape[0])])
    score_mat[0,:]=np.array([ti*w_ins for ti in xrange(score_mat.shape[1])])
    for si,sc in enumerate(srcw,1): 
        for ti,tc in enumerate(tgtw,1): 
            so=get_offset(sc,slang)
            to=get_offset(tc,tlang)
            if in_coordinated_range_offset(so) and in_coordinated_range_offset(to) and so==to: 
                score_mat[si,ti]=score_mat[si-1,ti-1]
            if not (in_coordinated_range_offset(so) or in_coordinated_range_offset(to)) and sc==tc: 
                score_mat[si,ti]=score_mat[si-1,ti-1]
            else: 
                score_mat[si,ti]= min(
                    score_mat[si-1,ti-1]+w_sub,
                    score_mat[si,ti-1]+w_ins,
                    score_mat[si-1,ti]+w_del,
                )
    return 1.0-score_mat[-1,-1]/float(max(len(srcw),len(tgtw)))
def score_lcsr(srcw,tgtw,slang,tlang):
    score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
    for si,sc in enumerate(srcw,1): 
        for ti,tc in enumerate(tgtw,1): 
            so=get_offset(sc,slang)
            to=get_offset(tc,tlang)
            if in_coordinated_range_offset(so) and in_coordinated_range_offset(to) and so==to: 
                score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
            if not (in_coordinated_range_offset(so) or in_coordinated_range_offset(to)) and sc==tc: 
                score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
            else: 
                score_mat[si,ti]= max(
                    score_mat[si,ti-1],
                    score_mat[si-1,ti])
    return score_mat[-1,-1]/float(max(len(srcw),len(tgtw)))
def iterate_phrase_table(phrase_table_fname): 
    with gzip.open(phrase_table_fname,'r') as phrase_table_file: 
        phrase_table=codecs.getreader('utf-8')(phrase_table_file)
        for line in phrase_table: 
            yield line.split(u' ||| ')
def add_charsim_alignment_phrase_table(phrase_table_fname,
                                        out_phrase_table_fname,
                                        src_lang,tgt_lang): 
    with gzip.open(out_phrase_table_fname,'wb') as out_phrase_table_file: 
        out_phrase_table=codecs.getwriter('utf-8')(out_phrase_table_file) 
        for fields in iterate_phrase_table(phrase_table_fname): 
            src_words= fields[0].strip().replace(u' ',u'')
            tgt_words= fields[1].strip().replace(u' ',u'')
            feat_values=[ float(x) for x in fields[2].strip().split(u' ') ]
            feat_values.extend([ 
                score_ned_similarity(src_words,tgt_words,src_lang,tgt_lang),
                score_lcsr(src_words,tgt_words,src_lang,tgt_lang),
                ])
            fields[2]=u' '+u' '.join([str(x) for x in feat_values])+u' '
            out_phrase_table.write(u' ||| '.join(fields))
def add_phonetic_alignment_phrase_table(phrase_table_fname,
                                        out_phrase_table_fname,
                                        src_lang,tgt_lang,
                                        similarity_metrics=[(sim1,True),(cosine,False),(softmax,True)],
                                        mismatch_p=0.0,gap_start_p=0.0): 
    sim_mats=[create_similarity_matrix(metric_func,src_lang,tgt_lang,normalize) for metric_func, normalize in similarity_metrics]
    with gzip.open(out_phrase_table_fname,'wb') as out_phrase_table_file: 
        out_phrase_table=codecs.getwriter('utf-8')(out_phrase_table_file) 
        for fields in iterate_phrase_table(phrase_table_fname): 
            #assert(len(fields)>=5))
            src_words= fields[0].strip().replace(u' ',u'')
            tgt_words= fields[1].strip().replace(u' ',u'')
            feat_values=[ float(x) for x in fields[2].strip().split(u' ') ]
            for (metric,_),sim_mat in zip(similarity_metrics,sim_mats): 
                score=score_phonetic_alignment(src_words,tgt_words,src_lang,tgt_lang,sim_mat,
                        0.0 if metric==equal else mismatch_p,0.0 if metric==equal else gap_start_p)
                feat_values.append(score)
            feat_values.extend([ 
                #score_ned_similarity(src_words,tgt_words,src_lang,tgt_lang),
                score_lcsr(src_words,tgt_words,src_lang,tgt_lang),
                ])
            fields[2]=u' '+u' '.join([str(x) for x in feat_values])+u' '
            out_phrase_table.write(u' ||| '.join(fields))
            ## lexical weighting - all wrong
            #alignments=[ [ int(x) for x in ap.strip().split(u'-') ]  for ap in fields[3].strip().split(u' ') ]
            #c_tpos={}
            #for spos, tpos in alignments: 
            #    c_tpos[tpos]=c_tpos.get(tpos,0.0)+1.0
            #score=0.0
            #for si, sw in enumerate(src_words): 
            #    term=0.0
            #    c=0.0
            #    for ti, tw in enumerate(tgt_words): 
            #        if [si,ti] in alignments: 
            #            c+=1.0
            #            term+=score_phonetic_alignment(src_words[si],tgt_words[ti],src_lang,tgt_lang,sim_mat)
            #    term=0.0 if c==0.0 else term/c
            #    score*=term
            # average 
            #score=0.0
            #fo
if __name__ == '__main__': 
    loader.load()
    #create_similarity_matrix(sim1,'hi','pa')
    add_phonetic_alignment_phrase_table(*sys.argv[1:])
    #add_charsim_alignment_phrase_table(*sys.argv[1:])
 | 
	gpl-3.0 | 
| 
	RayMick/scikit-learn | 
	sklearn/decomposition/tests/test_kernel_pca.py | 
	57 | 
	8062 | 
	import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
                                   assert_equal, assert_not_equal,
                                   assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((5, 4))
    X_pred = rng.random_sample((2, 4))
    def histogram(x, y, **kwargs):
        # Histogram kernel implemented as a callable.
        assert_equal(kwargs, {})    # no kernel_params that we didn't ask for
        return np.minimum(x, y).sum()
    for eigen_solver in ("auto", "dense", "arpack"):
        for kernel in ("linear", "rbf", "poly", histogram):
            # histogram kernel produces singular matrix inside linalg.solve
            # XXX use a least-squares approximation?
            inv = not callable(kernel)
            # transform fit data
            kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
                             fit_inverse_transform=inv)
            X_fit_transformed = kpca.fit_transform(X_fit)
            X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
            assert_array_almost_equal(np.abs(X_fit_transformed),
                                      np.abs(X_fit_transformed2))
            # non-regression test: previously, gamma would be 0 by default,
            # forcing all eigenvalues to 0 under the poly kernel
            assert_not_equal(X_fit_transformed.size, 0)
            # transform new data
            X_pred_transformed = kpca.transform(X_pred)
            assert_equal(X_pred_transformed.shape[1],
                         X_fit_transformed.shape[1])
            # inverse transform
            if inv:
                X_pred2 = kpca.inverse_transform(X_pred_transformed)
                assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
    assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
                  kernel='precomputed')
def test_kernel_pca_sparse():
    rng = np.random.RandomState(0)
    X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
    X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
    for eigen_solver in ("auto", "arpack"):
        for kernel in ("linear", "rbf", "poly"):
            # transform fit data
            kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
                             fit_inverse_transform=False)
            X_fit_transformed = kpca.fit_transform(X_fit)
            X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
            assert_array_almost_equal(np.abs(X_fit_transformed),
                                      np.abs(X_fit_transformed2))
            # transform new data
            X_pred_transformed = kpca.transform(X_pred)
            assert_equal(X_pred_transformed.shape[1],
                         X_fit_transformed.shape[1])
            # inverse transform
            # X_pred2 = kpca.inverse_transform(X_pred_transformed)
            # assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((5, 4))
    X_pred = rng.random_sample((2, 4))
    # for a linear kernel, kernel PCA should find the same projection as PCA
    # modulo the sign (direction)
    # fit only the first four components: fifth is near zero eigenvalue, so
    # can be trimmed due to roundoff error
    assert_array_almost_equal(
        np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
        np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((5, 4))
    X_pred = rng.random_sample((2, 4))
    for eigen_solver in ("dense", "arpack"):
        for c in [1, 2, 4]:
            kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
            shape = kpca.fit(X_fit).transform(X_pred).shape
            assert_equal(shape, (2, c))
def test_remove_zero_eig():
    X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
    # n_components=None (default) => remove_zero_eig is True
    kpca = KernelPCA()
    Xt = kpca.fit_transform(X)
    assert_equal(Xt.shape, (3, 0))
    kpca = KernelPCA(n_components=2)
    Xt = kpca.fit_transform(X)
    assert_equal(Xt.shape, (3, 2))
    kpca = KernelPCA(n_components=2, remove_zero_eig=True)
    Xt = kpca.fit_transform(X)
    assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((5, 4))
    X_pred = rng.random_sample((2, 4))
    for eigen_solver in ("dense", "arpack"):
        X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
            fit(X_fit).transform(X_pred)
        X_kpca2 = KernelPCA(
            4, eigen_solver=eigen_solver, kernel='precomputed').fit(
                np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
        X_kpca_train = KernelPCA(
            4, eigen_solver=eigen_solver,
            kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
        X_kpca_train2 = KernelPCA(
            4, eigen_solver=eigen_solver, kernel='precomputed').fit(
                np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
        assert_array_almost_equal(np.abs(X_kpca),
                                  np.abs(X_kpca2))
        assert_array_almost_equal(np.abs(X_kpca_train),
                                  np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
    rng = np.random.RandomState(0)
    X_fit = rng.random_sample((2, 4))
    kpca = KernelPCA(kernel="tototiti")
    assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
    # Test if we can do a grid-search to find parameters to separate
    # circles with a perceptron model.
    X, y = make_circles(n_samples=400, factor=.3, noise=.05,
                        random_state=0)
    kpca = KernelPCA(kernel="rbf", n_components=2)
    pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
    param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
    grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
    grid_search.fit(X, y)
    assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
    # Test if we can do a grid-search to find parameters to separate
    # circles with a perceptron model using a precomputed kernel.
    X, y = make_circles(n_samples=400, factor=.3, noise=.05,
                        random_state=0)
    kpca = KernelPCA(kernel="precomputed", n_components=2)
    pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
    param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
    grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
    X_kernel = rbf_kernel(X, gamma=2.)
    grid_search.fit(X_kernel, y)
    assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
    # Test the linear separability of the first 2D KPCA transform
    X, y = make_circles(n_samples=400, factor=.3, noise=.05,
                        random_state=0)
    # 2D nested circles are not linearly separable
    train_score = Perceptron().fit(X, y).score(X, y)
    assert_less(train_score, 0.8)
    # Project the circles data into the first 2 components of a RBF Kernel
    # PCA model.
    # Note that the gamma value is data dependent. If this test breaks
    # and the gamma value has to be updated, the Kernel PCA example will
    # have to be updated too.
    kpca = KernelPCA(kernel="rbf", n_components=2,
                     fit_inverse_transform=True, gamma=2.)
    X_kpca = kpca.fit_transform(X)
    # The data is perfectly linearly separable in that space
    train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
    assert_equal(train_score, 1.0)
 | 
	bsd-3-clause | 
| 
	CompPhysics/ComputationalPhysics2 | 
	doc/Programs/BoltzmannMachines/VMC/python/qdotnint.py | 
	2 | 
	5849 | 
	# 2-electron VMC code for 2dim quantum dot with importance sampling
# No Coulomb interaction
# Using gaussian rng for new positions and Metropolis- Hastings 
# Energy minimization using standard gradient descent 
# Common imports
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
if not os.path.exists(PROJECT_ROOT_DIR):
    os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
    os.makedirs(FIGURE_ID)
def image_path(fig_id):
    return os.path.join(FIGURE_ID, fig_id)
def save_fig(fig_id):
    plt.savefig(image_path(fig_id) + ".png", format='png')
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
from numba import jit
from scipy.optimize import minimize
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha):
    r1 = r[0,0]**2 + r[0,1]**2
    r2 = r[1,0]**2 + r[1,1]**2
    return exp(-0.5*alpha*(r1+r2))
# Local energy  for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha):
    
    r1 = (r[0,0]**2 + r[0,1]**2)
    r2 = (r[1,0]**2 + r[1,1]**2)
    return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha):
    
    r1 = (r[0,0]**2 + r[0,1]**2)
    r2 = (r[1,0]**2 + r[1,1]**2)
    WfDer = -0.5*(r1+r2)
    return  WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha):
    qforce = np.zeros((NumberParticles,Dimension), np.double)
    qforce[0,:] = -2*r[0,:]*alpha
    qforce[1,:] = -2*r[1,:]*alpha
    return qforce
    
# Computing the derivative of the energy and the energy 
# jit decorator tells Numba to compile this function.
# The argument types will be inferred by Numba when function is called.
@jit
def EnergyMinimization(alpha):
    NumberMCcycles= 1000
    # Parameters in the Fokker-Planck simulation of the quantum force
    D = 0.5
    TimeStep = 0.05
    # positions
    PositionOld = np.zeros((NumberParticles,Dimension), np.double)
    PositionNew = np.zeros((NumberParticles,Dimension), np.double)
    # Quantum force
    QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
    QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
    # seed for rng generator 
    seed()
    energy = 0.0
    DeltaE = 0.0
    EnergyDer = 0.0
    DeltaPsi = 0.0
    DerivativePsiE = 0.0
    #Initial position
    for i in range(NumberParticles):
        for j in range(Dimension):
            PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
    wfold = WaveFunction(PositionOld,alpha)
    QuantumForceOld = QuantumForce(PositionOld,alpha)
    #Loop over MC MCcycles
    for MCcycle in range(NumberMCcycles):
        #Trial position moving one particle at the time
        for i in range(NumberParticles):
            for j in range(Dimension):
                PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
                                       QuantumForceOld[i,j]*TimeStep*D
            wfnew = WaveFunction(PositionNew,alpha)
            QuantumForceNew = QuantumForce(PositionNew,alpha)
            GreensFunction = 0.0
            for j in range(Dimension):
                GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
	                              (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
                                      PositionNew[i,j]+PositionOld[i,j])
      
            GreensFunction = 1.0#exp(GreensFunction)
            ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
            #Metropolis-Hastings test to see whether we accept the move
            if random() <= ProbabilityRatio:
                for j in range(Dimension):
                    PositionOld[i,j] = PositionNew[i,j]
                    QuantumForceOld[i,j] = QuantumForceNew[i,j]
                wfold = wfnew
        DeltaE = LocalEnergy(PositionOld,alpha)
        DerPsi = DerivativeWFansatz(PositionOld,alpha)
        DeltaPsi +=DerPsi
        energy += DeltaE
        DerivativePsiE += DerPsi*DeltaE
            
    # We calculate mean values
    energy /= NumberMCcycles
    DerivativePsiE /= NumberMCcycles
    DeltaPsi /= NumberMCcycles
    EnergyDer  = 2*(DerivativePsiE-DeltaPsi*energy)
    return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
x0 = 0.5
# Set up iteration using stochastic gradient method
Energy =0 ; EnergyDer = 0
Energy, EnergyDer = EnergyMinimization(x0)
# No adaptive search for a minimum
eta = 0.5
Niterations = 50
Energies = np.zeros(Niterations)
EnergyDerivatives = np.zeros(Niterations)
AlphaValues = np.zeros(Niterations)
Totiterations = np.zeros(Niterations)
for iter in range(Niterations):
    gradients = EnergyDer
    x0 -= eta*gradients
    Energy, EnergyDer = EnergyMinimization(x0)
    Energies[iter] = Energy
    EnergyDerivatives[iter] = EnergyDer
    AlphaValues[iter] = x0
    Totiterations[iter] = iter
plt.subplot(2, 1, 1)
plt.plot(Totiterations, Energies, 'o-')
plt.title('Energy and energy derivatives')
plt.ylabel('Dimensionless energy')
plt.subplot(2, 1, 2)
plt.plot(Totiterations, EnergyDerivatives, '.-')
plt.xlabel(r'$\mathrm{Iterations}$', fontsize=15)
plt.ylabel('Energy derivative')
save_fig("QdotNonint")
plt.show()
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
data ={'Alpha':AlphaValues, 'Energy':Energies,'Derivative':EnergyDerivatives}
frame = pd.DataFrame(data)
print(frame)
 | 
	cc0-1.0 | 
| 
	MartialD/hyperspy | 
	hyperspy/tests/drawing/test_plot_signal_tools.py | 
	2 | 
	3018 | 
	# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of  HyperSpy.
#
#  HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
#  HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with  HyperSpy.  If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
import matplotlib.pyplot as plt
from hyperspy import signals, components1d
from hyperspy._signals.signal1d import BackgroundRemoval
from hyperspy.signal_tools import ImageContrastEditor
BASELINE_DIR = "plot_signal_tools"
DEFAULT_TOL = 2.0
STYLE_PYTEST_MPL = 'default'
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR,
                               tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL)
def test_plot_BackgroundRemoval():
    pl = components1d.PowerLaw()
    pl.A.value = 1e10
    pl.r.value = 3
    s = signals.Signal1D(pl.function(np.arange(100, 200)))
    s.axes_manager[0].offset = 100
    br = BackgroundRemoval(s,
                           background_type='Power Law',
                           polynomial_order=2,
                           fast=True,
                           plot_remainder=True,
                           show_progressbar=None)
    br.span_selector.set_initial((105, 115))
    br.span_selector.onmove_callback()
    return br.signal._plot.signal_plot.figure
@pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR,
                               tolerance=DEFAULT_TOL, style=STYLE_PYTEST_MPL)
@pytest.mark.parametrize("gamma", (0.7, 1.2))
@pytest.mark.parametrize("saturated_pixels", (0.3, 0.5))
def test_plot_contrast_editor(gamma, saturated_pixels):
    np.random.seed(1)
    data = np.random.random(size=(10, 10, 100, 100))*1000
    data += np.arange(10*10*100*100).reshape((10, 10, 100, 100))
    s = signals.Signal2D(data)
    s.plot(gamma=gamma, saturated_pixels=saturated_pixels)
    ceditor = ImageContrastEditor(s._plot.signal_plot)
    assert ceditor.gamma == gamma
    assert ceditor.saturated_pixels == saturated_pixels
    return plt.gcf()
@pytest.mark.parametrize("norm", ("linear", "log", "power", "symlog"))
def test_plot_contrast_editor_norm(norm):
    np.random.seed(1)
    data = np.random.random(size=(100, 100))*1000
    data += np.arange(100*100).reshape((100, 100))
    s = signals.Signal2D(data)
    s.plot(norm=norm)
    ceditor = ImageContrastEditor(s._plot.signal_plot)
    if norm == "log":
        # test log with negative numbers
        s2 = s - 5E3
        s2.plot(norm=norm)
        ceditor2 = ImageContrastEditor(s._plot.signal_plot)
    assert ceditor.norm == norm.capitalize()
 | 
	gpl-3.0 | 
| 
	adammenges/statsmodels | 
	statsmodels/sandbox/nonparametric/kdecovclass.py | 
	33 | 
	5703 | 
	'''subclassing kde
Author: josef pktd
'''
import numpy as np
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
    '''
    from Anne Archibald in mailinglist:
    http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
    '''
    def __init__(self, dataset, covariance):
        self.covariance = covariance
        scipy.stats.gaussian_kde.__init__(self, dataset)
    def _compute_covariance(self):
        self.inv_cov = np.linalg.inv(self.covariance)
        self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
    def __init__(self, dataset, covfact = 'scotts'):
        self.covfact = covfact
        scipy.stats.gaussian_kde.__init__(self, dataset)
    def _compute_covariance_(self):
        '''not used'''
        self.inv_cov = np.linalg.inv(self.covariance)
        self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
    def covariance_factor(self):
        if self.covfact in ['sc', 'scotts']:
            return self.scotts_factor()
        if self.covfact in ['si', 'silverman']:
            return self.silverman_factor()
        elif self.covfact:
            return float(self.covfact)
        else:
            raise ValueError('covariance factor has to be scotts, silverman or a number')
    def reset_covfact(self, covfact):
        self.covfact = covfact
        self.covariance_factor()
        self._compute_covariance()
def plotkde(covfact):
    gkde.reset_covfact(covfact)
    kdepdf = gkde.evaluate(ind)
    plt.figure()
    # plot histgram of sample
    plt.hist(xn, bins=20, normed=1)
    # plot estimated density
    plt.plot(ind, kdepdf, label='kde', color="g")
    # plot data generating density
    plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
                  (1-alpha) * stats.norm.pdf(ind, loc=mhigh),
                  color="r", label='DGP: normal mix')
    plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
    plt.legend()
from numpy.testing import assert_array_almost_equal, \
               assert_almost_equal, assert_
def test_kde_1d():
    np.random.seed(8765678)
    n_basesample = 500
    xn = np.random.randn(n_basesample)
    xnmean = xn.mean()
    xnstd = xn.std(ddof=1)
    print(xnmean, xnstd)
    # get kde for original sample
    gkde = stats.gaussian_kde(xn)
    # evaluate the density funtion for the kde for some points
    xs = np.linspace(-7,7,501)
    kdepdf = gkde.evaluate(xs)
    normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
    print('MSE', np.sum((kdepdf - normpdf)**2))
    print('axabserror', np.max(np.abs(kdepdf - normpdf)))
    intervall = xs[1] - xs[0]
    assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
    #assert_array_almost_equal(kdepdf, normpdf, decimal=2)
    print(gkde.integrate_gaussian(0.0, 1.0))
    print(gkde.integrate_box_1d(-np.inf, 0.0))
    print(gkde.integrate_box_1d(0.0, np.inf))
    print(gkde.integrate_box_1d(-np.inf, xnmean))
    print(gkde.integrate_box_1d(xnmean, np.inf))
    assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
    assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
    assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
    assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
    assert_almost_equal(gkde.integrate_kde(gkde),
                        (kdepdf**2).sum()*intervall, decimal=2)
    assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
                        (kdepdf*normpdf).sum()*intervall, decimal=2)
##    assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
##                        (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
    # generate a sample
    n_basesample = 1000
    np.random.seed(8765678)
    alpha = 0.6 #weight for (prob of) lower distribution
    mlow, mhigh = (-3,3)  #mean locations for gaussian mixture
    xn =  np.concatenate([mlow + np.random.randn(alpha * n_basesample),
                       mhigh + np.random.randn((1-alpha) * n_basesample)])
    # get kde for original sample
    #gkde = stats.gaussian_kde(xn)
    gkde = gaussian_kde_covfact(xn, 0.1)
    # evaluate the density funtion for the kde for some points
    ind = np.linspace(-7,7,101)
    kdepdf = gkde.evaluate(ind)
    plt.figure()
    # plot histgram of sample
    plt.hist(xn, bins=20, normed=1)
    # plot estimated density
    plt.plot(ind, kdepdf, label='kde', color="g")
    # plot data generating density
    plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
                  (1-alpha) * stats.norm.pdf(ind, loc=mhigh),
                  color="r", label='DGP: normal mix')
    plt.title('Kernel Density Estimation')
    plt.legend()
    gkde = gaussian_kde_covfact(xn, 'scotts')
    kdepdf = gkde.evaluate(ind)
    plt.figure()
    # plot histgram of sample
    plt.hist(xn, bins=20, normed=1)
    # plot estimated density
    plt.plot(ind, kdepdf, label='kde', color="g")
    # plot data generating density
    plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
                  (1-alpha) * stats.norm.pdf(ind, loc=mhigh),
                  color="r", label='DGP: normal mix')
    plt.title('Kernel Density Estimation')
    plt.legend()
    #plt.show()
    for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
        plotkde(cv)
    test_kde_1d()
    np.random.seed(8765678)
    n_basesample = 1000
    xn = np.random.randn(n_basesample)
    xnmean = xn.mean()
    xnstd = xn.std(ddof=1)
    # get kde for original sample
    gkde = stats.gaussian_kde(xn)
 | 
	bsd-3-clause | 
| 
	eredmiles/Fraud-Corruption-Detection-Data-Science-Pipeline-DSSG2015 | 
	WorldBank2015/Code/data_pipeline_src/rank_gen.py | 
	2 | 
	7813 | 
	import csv
from IPython.display import Image
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn import cross_validation
from sklearn import ensemble
from sklearn import metrics
import seaborn as sns
import re
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
import sys
sys.stdout = open('rank_list.csv', 'w')
# generating features dataframe:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input_file',help='Contract data file')
args = parser.parse_args()
df_supplier = pd.read_csv(args.input_file);
#Subsetting on only main allegation outcomes
df_supplier_= df_supplier[(df_supplier['allegation_outcome'] == 'Substantiated') |
                           (df_supplier['allegation_outcome']== 'Unfounded') | 
                           (df_supplier['allegation_outcome']= 'Unsubstantiated')]
#remove duplicate columns from sql merging
cols_fixed = []
for col in df_supplier_.columns:
    pattern_y = re.compile('.*_y')
    pattern_x = re.compile('.*_x')
    if pattern_y.match(col):
        df_supplier_.drop(col,axis=1,inplace=True)
    elif pattern_x.match(col):
        cols_fixed.append(col[:-2])
    else:
        cols_fixed.append(col)
df_supplier_.columns = cols_fixed
#setup feature groups
col_group_names = ['supplier_major_sectors','supplier_major_sectors_anon',
                   'supplier_countries','supplier_countries_anon',
                   'supplier_regions','supplier_regions_anon',
                   'network']
col_groups = [ ['major_sector_cum_contracts_.*','major_sector_percent_contracts_.*','.*_sector_dominance'], 
               ['sector_dominance_\d+','sector_percent_\d+'],  
               ['country_cum_contracts_.*','country_percent_contracts_.*','.*_country_dominance'],
               ['country_dominance_\d+','country_percent_\d+'],
               ['region_cum_contracts_.*','region_percent_contracts_.*','.*_region_dominance'],
               ['region_dominance_\d+','region_percent_\d+'],
               ['.*centrality.*','.*giant_component.*','.*neighbor.*','.*betweeness.*','.*dist_invest.*']]
col_group_dict = {}
for i,col_group in enumerate(col_groups):
    col_list = []
    for regexp in col_group:
        pattern = re.compile(regexp)
        for col in df_supplier_.columns:
            if pattern.match(col) and col not in col_list:
                col_list.append(col)
    col_group_dict[col_group_names[i]] = col_list
col_group_dict['country_specific'] = ['business_disclosure_index',
                                      'firms_competing_against_informal_firms_perc',
                                      'payments_to_public_officials_perc',
                                      'do_not_report_all_sales_perc',
                                      'legal_rights_index',
                                      'time_to_enforce_contract',
                                      'bribes_to_tax_officials_perc',
                                      'property_rights_rule_governance_rating',
                                      'transparency_accountability_corruption_rating',
                                      'gdp_per_capita',
                                      'primary_school_graduation_perc',
                                      'gini_index',
                                      'unemployment_perc',
                                      'country_mean',
                                      'year_mean',
                                      'business_disclosure_index_mean',
                                      'firms_competing_against_informal_firms_perc_mean',
                                      'payments_to_public_officials_perc_mean',
                                      'do_not_report_all_sales_perc_mean',
                                      'legal_rights_index_mean',
                                      'time_to_enforce_contract_mean',
                                      'bribes_to_tax_officials_perc_mean',
                                      'property_rights_rule_governance_rating_mean',
                                      'transparency_accountability_corruption_rating_mean',
                                      'gdp_per_capita_mean',
                                      'primary_school_graduation_perc_mean',
                                      'gini_index_mean',
                                      'unemployment_perc_mean']
col_group_dict['contract'] = ['major_sector',
                              'proc_categ',
                              'proc_meth',
                              'domestic_pref_allwed',
                              'domestic_pref_affect',
                              'date_diff',
                              'price_escaltn_flag',
                              '#_supp_awd',
                              'project_proportion',
                              'amount_standardized']
col_group_dict['contract_country'] = ['region',
                                      'country',  
                                      'supp_ctry']
col_group_dict['project'] = ['project_total_amount']
#select feature groups
#col_set = ['contract','project','network','supplier_major_sectors','supplier_countries_anon','supplier_regions']
col_set = ['supplier_major_sectors','supplier_major_sectors_anon',
                   'supplier_countries','supplier_countries_anon',
                   'supplier_regions','supplier_regions_anon',
                   'network','contract','project']
col_selection = []
for cset in col_set:
    col_selection += col_group_dict[cset]
 
df_new = df_supplier_[col_selection]
#df_new.drop('region',axis=1,inplace=True)
#select labels
label = df_supplier_['outcome_of_overall_investigation_when_closed']
print ('labels data', label.shape)
y = label.copy()
y.replace('Substantiated',1,inplace=True)
y.replace('Unsubstantiated',0,inplace=True)
y.replace('Unfounded',0,inplace=True)
#make dummy variables from categoricals
categorical = df_new.select_dtypes(include=[object])
for col in categorical.columns:
    #print(categorical[col])
    #print (col)
    if  categorical[col].nunique() > 2:
        dummy_features = pd.get_dummies(categorical[col]) 
        dummy_features.columns = ['is_' + '_'.join(c.split()).lower() for c in dummy_features.columns]
        df_new.drop(col,axis=1,inplace=True)
        df_new = df_new.merge(dummy_features,left_index=True,right_index=True)
# 
#fill NaNs/drop columns with all null and handle NaNs
df_new.replace([np.inf, -np.inf], np.nan,inplace=True);
for col in df_new.columns:
    null_count = df_new[col].isnull().sum()
    percent_of_null = float(100*((null_count)/len(df_new[col])))
    if percent_of_null == 100.0:
        df_new.drop(col, axis=1, inplace=True)
        #print ('dropping',col)
    elif null_count >0:
        df_new[col+'_is_null'] = df_new[col].isnull()
       # df_new = df_new.merge(col+'isnull',left_index=True,right_index=True)
        df_new[col].fillna(-99999.99,inplace=True)
x_train,x_test,y_train,y_test = train_test_split(df_new,y,test_size = 0.2)
clf_rf = RandomForestClassifier(n_estimators=100,max_depth=80)
clf_rf.fit(x_train,y_train)
y_pred = []
prob_score = clf_rf.predict_proba(x_train)
a = prob_score[:,1]
for idx,item in enumerate(a):
    if item>= 0.55:
        item = 1
    else:
        item =0
    y_pred.append(item)
prob_score = [];
for idx,item in enumerate(x_test):
    a = clf_rf.predict_proba(item)
    prob_score.append([a[:,1], idx])
prob_score.sort()
b = prob_score[::-1]
b = np.array(b)
index = b.T[1]
column = ['wb_contract_number','supplier','major_sector_x']
for i in index:
    for item in column:
        print str(df_supplier.iloc[i][item]) + ',',
    print ""
 | 
	mit | 
| 
	theoryno3/scikit-learn | 
	examples/model_selection/plot_roc.py | 
	146 | 
	3697 | 
	"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
    See also :func:`sklearn.metrics.roc_auc_score`,
             :ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
                                                    random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
                                 random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
    fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
    roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
         label='micro-average ROC curve (area = {0:0.2f})'
               ''.format(roc_auc["micro"]))
for i in range(n_classes):
    plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
                                   ''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
 | 
	bsd-3-clause | 
| 
	Juanlu001/aquagpusph | 
	examples/3D/spheric_testcase10_waveimpact/cMake/plot_p.py | 
	2 | 
	5919 | 
	#******************************************************************************
#                                                                             *
#              *    **   *  *   *                           *                 *
#             * *  *  *  *  *  * *                          *                 *
#            ***** *  *  *  * *****  **  ***  *  *  ** ***  ***               *
#            *   * *  *  *  * *   * *  * *  * *  * *   *  * *  *              *
#            *   * *  *  *  * *   * *  * *  * *  *   * *  * *  *              *
#            *   *  ** *  **  *   *  *** ***   *** **  ***  *  *              *
#                                      * *             *                      *
#                                    **  *             *                      *
#                                                                             *
#******************************************************************************
#                                                                             *
#  This file is part of AQUAgpusph, a free CFD program based on SPH.          *
#  Copyright (C) 2012  Jose Luis Cercos Pita <[email protected]>               *
#                                                                             *
#  AQUAgpusph is free software: you can redistribute it and/or modify         *
#  it under the terms of the GNU General Public License as published by       *
#  the Free Software Foundation, either version 3 of the License, or          *
#  (at your option) any later version.                                        *
#                                                                             *
#  AQUAgpusph is distributed in the hope that it will be useful,              *
#  but WITHOUT ANY WARRANTY; without even the implied warranty of             *
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the              *
#  GNU General Public License for more details.                               *
#                                                                             *
#  You should have received a copy of the GNU General Public License          *
#  along with AQUAgpusph.  If not, see <http://www.gnu.org/licenses/>.        *
#                                                                             *
#******************************************************************************
import sys
import os
from os import path
import numpy as np
try:
    from PyQt4 import QtGui
except:
    try:
        from PySide import QtGui
    except:
        raise ImportError("PyQt4 or PySide is required to use this tool")
try:
    from matplotlib.figure import Figure
    from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except:
    raise ImportError("matplotlib is required to use this tool")
class FigureController(FigureCanvas):
    """Matplotlib figure widget controller"""
    def __init__(self):
        """Constructor"""
        # Create the figure in the canvas
        self.fig = Figure()
        self.ax = self.fig.add_subplot(111)
        FigureCanvas.__init__(self, self.fig)
        # generates first "empty" plot
        FNAME = path.join('@EXAMPLE_DEST_DIR@', 'lateral_water_1x.txt')
        T,P,A,DADT,_,_ = np.loadtxt(FNAME,
                                    delimiter='\t',
                                    skiprows=1,
                                    unpack=True)
        self.exp_t = T
        self.exp_p = 100.0 * P
        self.exp_line, = self.ax.plot(self.exp_t,
                                      self.exp_p,
                                      label=r'$p_{Exp}$',
                                      color="red",
                                      linewidth=1.0)
        self.t = [0.0]
        self.p = [0.0]
        self.line, = self.ax.plot(self.t,
                                  self.p,
                                  label=r'$p_{SPH}$',
                                  color="black",
                                  linewidth=1.0)
        # Set some options
        self.ax.grid()
        self.ax.legend(loc='best')
        self.ax.set_xlim(0, 5)
        self.ax.set_ylim(-1000, 5000)
        self.ax.set_autoscale_on(False)
        self.ax.set_xlabel(r"$t \, [\mathrm{s}]$", fontsize=21)
        self.ax.set_ylabel(r"$p \, [\mathrm{Pa}]$", fontsize=21)
        # force the figure redraw
        self.fig.canvas.draw()
        # call the update method (to speed-up visualization)
        self.timerEvent(None)
        # start timer, trigger event every 1000 millisecs (=1sec)
        self.timer = self.startTimer(1000)
    def readFile(self, filepath):
        """ Read and extract data from a file
        :param filepath File ot read
        """
        abspath = filepath
        if not path.isabs(filepath):
            abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
        # Read the file by lines
        f = open(abspath, "r")
        lines = f.readlines()
        f.close()
        data = []
        for l in lines[1:-1]:  # Skip the last line, which may be unready
            l = l.strip()
            while l.find('  ') != -1:
                l = l.replace('  ', ' ')
            fields = l.split(' ')
            try:
                data.append(map(float, fields))
            except:
                continue
        # Transpose the data
        return map(list, zip(*data))
    def timerEvent(self, evt):
        """Custom timerEvent code, called at timer event receive"""
        # Read and plot the new data
        data = self.readFile('sensors.out')
        self.t = data[0]
        self.p = data[1]
        self.line.set_data(self.t, self.p)
        # Redraw
        self.fig.canvas.draw()
if __name__ == '__main__':
    app = QtGui.QApplication(sys.argv)
    widget = FigureController()
    widget.setWindowTitle("Pressure")
    widget.show()
    sys.exit(app.exec_())
 | 
	gpl-3.0 | 
| 
	yunfeilu/scikit-learn | 
	sklearn/tests/test_metaestimators.py | 
	226 | 
	4954 | 
	"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
    def __init__(self, name, construct, skip_methods=(),
                 fit_args=make_classification()):
        self.name = name
        self.construct = construct
        self.fit_args = fit_args
        self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
    DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
    DelegatorData('GridSearchCV',
                  lambda est: GridSearchCV(
                      est, param_grid={'param': [5]}, cv=2),
                  skip_methods=['score']),
    DelegatorData('RandomizedSearchCV',
                  lambda est: RandomizedSearchCV(
                      est, param_distributions={'param': [5]}, cv=2, n_iter=1),
                  skip_methods=['score']),
    DelegatorData('RFE', RFE,
                  skip_methods=['transform', 'inverse_transform', 'score']),
    DelegatorData('RFECV', RFECV,
                  skip_methods=['transform', 'inverse_transform', 'score']),
    DelegatorData('BaggingClassifier', BaggingClassifier,
                  skip_methods=['transform', 'inverse_transform', 'score',
                                'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
    # Ensures specified metaestimators have methods iff subestimator does
    def hides(method):
        @property
        def wrapper(obj):
            if obj.hidden_method == method.__name__:
                raise AttributeError('%r is hidden' % obj.hidden_method)
            return functools.partial(method, obj)
        return wrapper
    class SubEstimator(BaseEstimator):
        def __init__(self, param=1, hidden_method=None):
            self.param = param
            self.hidden_method = hidden_method
        def fit(self, X, y=None, *args, **kwargs):
            self.coef_ = np.arange(X.shape[1])
            return True
        def _check_fit(self):
            if not hasattr(self, 'coef_'):
                raise RuntimeError('Estimator is not fit')
        @hides
        def inverse_transform(self, X, *args, **kwargs):
            self._check_fit()
            return X
        @hides
        def transform(self, X, *args, **kwargs):
            self._check_fit()
            return X
        @hides
        def predict(self, X, *args, **kwargs):
            self._check_fit()
            return np.ones(X.shape[0])
        @hides
        def predict_proba(self, X, *args, **kwargs):
            self._check_fit()
            return np.ones(X.shape[0])
        @hides
        def predict_log_proba(self, X, *args, **kwargs):
            self._check_fit()
            return np.ones(X.shape[0])
        @hides
        def decision_function(self, X, *args, **kwargs):
            self._check_fit()
            return np.ones(X.shape[0])
        @hides
        def score(self, X, *args, **kwargs):
            self._check_fit()
            return 1.0
    methods = [k for k in iterkeys(SubEstimator.__dict__)
               if not k.startswith('_') and not k.startswith('fit')]
    methods.sort()
    for delegator_data in DELEGATING_METAESTIMATORS:
        delegate = SubEstimator()
        delegator = delegator_data.construct(delegate)
        for method in methods:
            if method in delegator_data.skip_methods:
                continue
            assert_true(hasattr(delegate, method))
            assert_true(hasattr(delegator, method),
                        msg="%s does not have method %r when its delegate does"
                            % (delegator_data.name, method))
            # delegation before fit raises an exception
            assert_raises(Exception, getattr(delegator, method),
                          delegator_data.fit_args[0])
        delegator.fit(*delegator_data.fit_args)
        for method in methods:
            if method in delegator_data.skip_methods:
                continue
            # smoke test delegation
            getattr(delegator, method)(delegator_data.fit_args[0])
        for method in methods:
            if method in delegator_data.skip_methods:
                continue
            delegate = SubEstimator(hidden_method=method)
            delegator = delegator_data.construct(delegate)
            assert_false(hasattr(delegate, method))
            assert_false(hasattr(delegator, method),
                         msg="%s has method %r when its delegate does not"
                             % (delegator_data.name, method))
 | 
	bsd-3-clause | 
| 
	kprestel/PyInvestment | 
	pytech/decorators/decorators.py | 
	2 | 
	4087 | 
	from functools import wraps
import pandas as pd
from arctic.chunkstore.chunkstore import ChunkStore
import pytech.utils as utils
from pytech.mongo import ARCTIC_STORE, BarStore
from pytech.utils.exceptions import InvalidStoreError, PyInvestmentKeyError
from pandas.tseries.offsets import BDay
from pytech.data._holders import DfLibName
def memoize(obj):
    """Memoize functions so they don't have to be reevaluated."""
    cache = obj.cache = {}
    @wraps(obj)
    def memoizer(*args, **kwargs):
        key = str(args) + str(kwargs)
        if key not in cache:
            cache[key] = obj(*args, **kwargs)
        return cache[key]
    return memoizer
def optional_arg_decorator(fn):
    """Used to **only** to wrap decorators that take optional arguments."""
    def wrapped_decorator(*args):
        if len(args) == 1 and callable(args[0]):
            return fn(args[0])
        else:
            def real_decorator(decoratee):
                return fn(decoratee, *args)
            return real_decorator
    return wrapped_decorator
def write_chunks(chunk_size='D', remove_ticker=True):
    """
    Used to wrap functions that return :class:`pd.DataFrame`s and writes the
    output to a :class:`ChunkStore`. It is required that the the wrapped
    function contains a column called 'ticker' to use as the key in the db.
    :param lib_name: The name of the library to write the
        :class:`pd.DataFrame` to.
    :param chunk_size: The chunk size to use options are:
        * D = Days
        * M = Months
        * Y = Years
    :param remove_ticker: If true the ticker column will be deleted before the
        :class:`pd.DataFrame` is returned, otherwise it will remain which is
        going to use more memory than required.
    :return: The output of the original function.
    """
    def wrapper(f):
        @wraps(f)
        def eval_and_write(*args, **kwargs):
            df_lib_name = f(*args, **kwargs)
            df = df_lib_name.df
            lib_name = df_lib_name.lib_name
            try:
                # TODO: make this use the fast scalar getter
                ticker = df[utils.TICKER_COL][0]
                # ticker = df.at[0, pd_utils.TICKER_COL]
            except KeyError:
                raise PyInvestmentKeyError(
                    'Decorated functions are required to add a column '
                    f'{utils.TICKER_COL} that contains the ticker.')
            if remove_ticker:
                # should this be saved?
                df.drop(utils.TICKER_COL, axis=1, inplace=True)
            # this is a work around for a flaw in the the arctic DateChunker.
            if 'date' not in df.columns or 'date' not in df.index.names:
                if df.index.dtype == pd.to_datetime(['2017']).dtype:
                    df.index.name = 'date'
                else:
                    raise ValueError('df must be datetime indexed or have a'
                                     'column named "date".')
            if lib_name not in ARCTIC_STORE.list_libraries():
                # create the lib if it does not already exist
                ARCTIC_STORE.initialize_library(lib_name,
                                                BarStore.LIBRARY_TYPE)
            lib = ARCTIC_STORE[lib_name]
            if not isinstance(lib, ChunkStore):
                raise InvalidStoreError(required=ChunkStore,
                                        provided=type(lib))
            else:
                lib.update(ticker, df, chunk_size=chunk_size, upsert=True)
            df.index.freq = BDay()
            return DfLibName(df, lib_name)
        return eval_and_write
    return wrapper
class lazy_property(object):
    """
    Used for lazy evaluation of an obj attr.
    Property should represent non-mutable data, as it replaces itself.
    """
    def __init__(self, f):
        self.f = f
        self.func_name = f.__name__
    def __get__(self, obj, cls):
        if obj is None:
            return None
        val = self.f(obj)
        setattr(obj, self.func_name, val)
        return val
 | 
	mit | 
| 
	chenyyx/scikit-learn-doc-zh | 
	examples/zh/ensemble/plot_gradient_boosting_regression.py | 
	58 | 
	2510 | 
	"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
# #############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
# #############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
          'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
# #############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
    test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
         label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
         label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
# #############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
 | 
	gpl-3.0 | 
| 
	abbeymiles/aima-python | 
	submissions/Ottenlips/myNN.py | 
	10 | 
	4316 | 
	from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Ottenlips import billionaires
from submissions.Ottenlips import billionaires
class DataFrame:
    data = []
    feature_names = []
    target = []
    target_names = []
bill = DataFrame()
list_of_billionaire = billionaires.get_billionaires()
def billtarget(billions):
    if billions<3.0:
        return 1
    else:
        return 0
for billionaires in list_of_billionaire:
    # print(billionaires['wealth']['type'])
    # print(billionaires)
    bill.target.append(billtarget(billionaires['wealth']['worth in billions']))
    # bill.target.append(billionaires['wealth']['how']['inherited'])
    bill.data.append([
        float(billionaires['demographics']['age']),
        float(billionaires['location']['gdp']),
        float(billionaires['rank']),
    ])
bill.feature_names = [
    'age',
    'gdp of origin country',
    'rank',
]
bill.target_names = [
    'very rich',
    'less rich',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
            hidden_layer_sizes = (10,),
            # activation = 'relu',
            solver='sgd',
            #alpha = 0.0001,
            # batch_size='auto',
            learning_rate = 'adaptive',
            # power_t = 0.5,
            max_iter = 100, # 200,
            # shuffle = True,
            # random_state = None,
            # tol = 1e-4,
            # verbose = True,
            # warm_start = False,
            # momentum = 0.9,
            # nesterovs_momentum = True,
            # early_stopping = False,
            # validation_fraction = 0.1,
            # beta_1 = 0.9,
            # beta_2 = 0.999,
            # epsilon = 1e-8,
)
mlpcTwo = MLPClassifier(
            hidden_layer_sizes = (1000,),
            # activation = 'relu',
            solver='sgd',
            #alpha = 0.0001,
            # batch_size='auto',
            learning_rate = 'adaptive',
            # power_t = 0.5,
            max_iter = 1000, # 200,
            shuffle = True,
            # random_state = None,
            # tol = 1e-4,
            # verbose = True,
            # warm_start = False,
            # momentum = 0.9,
            # nesterovs_momentum = True,
            # early_stopping = False,
            # validation_fraction = 0.1,
            # beta_1 = 0.9,
            # beta_2 = 0.999,
            # epsilon = 1e-8,
)
billScaled = DataFrame()
def setupScales(grid):
    global min, max
    min = list(grid[0])
    max = list(grid[0])
    for row in range(1, len(grid)):
        for col in range(len(grid[row])):
            cell = grid[row][col]
            if cell < min[col]:
                min[col] = cell
            if cell > max[col]:
                max[col] = cell
def scaleGrid(grid):
    newGrid = []
    for row in range(len(grid)):
        newRow = []
        for col in range(len(grid[row])):
            try:
                cell = grid[row][col]
                scaled = (cell - min[col]) \
                         / (max[col] - min[col])
                newRow.append(scaled)
            except:
                pass
        newGrid.append(newRow)
    return newGrid
setupScales(bill.data)
billScaled.data = scaleGrid(bill.data)
billScaled.feature_names = bill.feature_names
billScaled.target = bill.target
billScaled.target_names = bill.target_names
Examples = {
   'BillMLPC': {
        'frame': bill,
        'mlpc': mlpc,
},
 'BillMLPCTwo': {
        'frame': bill,
        'mlpc': mlpcTwo,
},
    'BillScaled':{
        'frame':billScaled,
    },
'BillScaled':{
        'frame':billScaled,
    },
    'Bill': {'frame':bill},
}
#
# billTwo = DataFrame()
#
#
#
# for billionaires in list_of_billionaire:
#     # print(billionaires['wealth']['type'])
#     #print(billionaires)
#     billTwo.target.append(float(billionaires['wealth']['worth in billions']))
#     # bill.target.append(billionaires['wealth']['how']['inherited'])
#     billTwo.data.append([
#         float(billionaires['location']['gdp']),
#         float(billionaires['rank']),
#         float(billionaires['demographics']['age']),
#     ])
#
#
#
#
# billTwo.feature_names = [
#         'gdp of origin country',
#         'rank',
#         'age',
#     ]
#
# billTwo.target_names = [
#         'worth',
#     ] | 
	mit | 
| 
	thilbern/scikit-learn | 
	examples/plot_johnson_lindenstrauss_bound.py | 
	134 | 
	7452 | 
	"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
  (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
  n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
  handwritten digits pictures are randomly projected to spaces for various
  larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
  features in total are projected using a sparse random matrix to smaller
  euclidean spaces with various values for the target number of dimensions
  ``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
  and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
    min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
    plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
    min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
    plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
    # Need an internet connection hence not enabled by default
    data = fetch_20newsgroups_vectorized().data[:500]
else:
    data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
      % (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
    t0 = time()
    rp = SparseRandomProjection(n_components=n_components)
    projected_data = rp.fit_transform(data)
    print("Projected %d samples from %d to %d in %0.3fs"
          % (n_samples, n_features, n_components, time() - t0))
    if hasattr(rp, 'components_'):
        n_bytes = rp.components_.data.nbytes
        n_bytes += rp.components_.indices.nbytes
        print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
    projected_dists = euclidean_distances(
        projected_data, squared=True).ravel()[nonzero]
    plt.figure()
    plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
    plt.xlabel("Pairwise squared distances in original space")
    plt.ylabel("Pairwise squared distances in projected space")
    plt.title("Pairwise distances distribution for n_components=%d" %
              n_components)
    cb = plt.colorbar()
    cb.set_label('Sample pairs counts')
    rates = projected_dists / dists
    print("Mean distances rate: %0.2f (%0.2f)"
          % (np.mean(rates), np.std(rates)))
    plt.figure()
    plt.hist(rates, bins=50, normed=True, range=(0., 2.))
    plt.xlabel("Squared distances rate: projected / original")
    plt.ylabel("Distribution of samples pairs")
    plt.title("Histogram of pairwise distance rates for n_components=%d" %
              n_components)
    # TODO: compute the expected value of eps and add them to the previous plot
    # as vertical lines / region
plt.show()
 | 
	bsd-3-clause | 
| 
	icexelloss/spark | 
	examples/src/main/python/sql/arrow.py | 
	1 | 
	5042 | 
	#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
  ./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
    # $example on:dataframe_with_arrow$
    import numpy as np
    import pandas as pd
    # Enable Arrow-based columnar data transfers
    spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
    # Generate a Pandas DataFrame
    pdf = pd.DataFrame(np.random.rand(100, 3))
    # Create a Spark DataFrame from a Pandas DataFrame using Arrow
    df = spark.createDataFrame(pdf)
    # Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
    result_pdf = df.select("*").toPandas()
    # $example off:dataframe_with_arrow$
    print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
    # $example on:scalar_pandas_udf$
    import pandas as pd
    from pyspark.sql.functions import col, pandas_udf
    from pyspark.sql.types import LongType
    # Declare the function and create the UDF
    def multiply_func(a, b):
        return a * b
    multiply = pandas_udf(multiply_func, returnType=LongType())
    # The function for a pandas_udf should be able to execute with local Pandas data
    x = pd.Series([1, 2, 3])
    print(multiply_func(x, x))
    # 0    1
    # 1    4
    # 2    9
    # dtype: int64
    # Create a Spark DataFrame, 'spark' is an existing SparkSession
    df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
    # Execute function as a Spark vectorized UDF
    df.select(multiply(col("x"), col("x"))).show()
    # +-------------------+
    # |multiply_func(x, x)|
    # +-------------------+
    # |                  1|
    # |                  4|
    # |                  9|
    # +-------------------+
    # $example off:scalar_pandas_udf$
def grouped_map_pandas_udf_example(spark):
    # $example on:grouped_map_pandas_udf$
    from pyspark.sql.functions import pandas_udf, PandasUDFType
    df = spark.createDataFrame(
        [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
        ("id", "v"))
    @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
    def subtract_mean(pdf):
        # pdf is a pandas.DataFrame
        v = pdf.v
        return pdf.assign(v=v - v.mean())
    df.groupby("id").apply(subtract_mean).show()
    # +---+----+
    # | id|   v|
    # +---+----+
    # |  1|-0.5|
    # |  1| 0.5|
    # |  2|-3.0|
    # |  2|-1.0|
    # |  2| 4.0|
    # +---+----+
    # $example off:grouped_map_pandas_udf$
def grouped_agg_pandas_udf_example(spark):
    # $example on:grouped_agg_pandas_udf$
    from pyspark.sql.functions import pandas_udf, PandasUDFType
    from pyspark.sql import Window
    df = spark.createDataFrame(
        [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
        ("id", "v"))
    @pandas_udf("double", PandasUDFType.GROUPED_AGG)
    def mean_udf(v):
        return v.mean()
    df.groupby("id").agg(mean_udf(df['v'])).show()
    # +---+-----------+
    # | id|mean_udf(v)|
    # +---+-----------+
    # |  1|        1.5|
    # |  2|        6.0|
    # +---+-----------+
    w = Window \
        .partitionBy('id') \
        .rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
    df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
    # +---+----+------+
    # | id|   v|mean_v|
    # +---+----+------+
    # |  1| 1.0|   1.5|
    # |  1| 2.0|   1.5|
    # |  2| 3.0|   6.0|
    # |  2| 5.0|   6.0|
    # |  2|10.0|   6.0|
    # +---+----+------+
    # $example off:grouped_agg_pandas_udf$
if __name__ == "__main__":
    spark = SparkSession \
        .builder \
        .appName("Python Arrow-in-Spark example") \
        .getOrCreate()
    print("Running Pandas to/from conversion example")
    dataframe_with_arrow_example(spark)
    print("Running pandas_udf scalar example")
    scalar_pandas_udf_example(spark)
    print("Running pandas_udf grouped map example")
    grouped_map_pandas_udf_example(spark)
    spark.stop()
 | 
	apache-2.0 | 
| 
	peastman/deepchem | 
	contrib/pubchem_dataset/create_assay_overview.py | 
	6 | 
	39119 | 
	import pandas as pd
import os
import pickle
import array
from bisect import bisect_left
import gzip
import time
import shutil
import deepchem
import requests
import argparse
import numpy as np
data_dir = deepchem.utils.get_data_dir()
sdf_dir = os.path.join(data_dir, "Data")
class PCBADatsetBuilder:
  def __init__(self):
    self.pcba_128_assay_list = "PCBA-1030,PCBA-1379,PCBA-1452,PCBA-1454,PCBA-1457,PCBA-1458,PCBA-1460,PCBA-1461,PCBA-1468,PCBA-1469,PCBA-1471,PCBA-1479,PCBA-1631,PCBA-1634,PCBA-1688,PCBA-1721,PCBA-2100,PCBA-2101,PCBA-2147,PCBA-2242,PCBA-2326,PCBA-2451,PCBA-2517,PCBA-2528,PCBA-2546,PCBA-2549,PCBA-2551,PCBA-2662,PCBA-2675,PCBA-2676,PCBA-411,PCBA-463254,PCBA-485281,PCBA-485290,PCBA-485294,PCBA-485297,PCBA-485313,PCBA-485314,PCBA-485341,PCBA-485349,PCBA-485353,PCBA-485360,PCBA-485364,PCBA-485367,PCBA-492947,PCBA-493208,PCBA-504327,PCBA-504332,PCBA-504333,PCBA-504339,PCBA-504444,PCBA-504466,PCBA-504467,PCBA-504706,PCBA-504842,PCBA-504845,PCBA-504847,PCBA-504891,PCBA-540276,PCBA-540317,PCBA-588342,PCBA-588453,PCBA-588456,PCBA-588579,PCBA-588590,PCBA-588591,PCBA-588795,PCBA-588855,PCBA-602179,PCBA-602233,PCBA-602310,PCBA-602313,PCBA-602332,PCBA-624170,PCBA-624171,PCBA-624173,PCBA-624202,PCBA-624246,PCBA-624287,PCBA-624288,PCBA-624291,PCBA-624296,PCBA-624297,PCBA-624417,PCBA-651635,PCBA-651644,PCBA-651768,PCBA-651965,PCBA-652025,PCBA-652104,PCBA-652105,PCBA-652106,PCBA-686970,PCBA-686978,PCBA-686979,PCBA-720504,PCBA-720532,PCBA-720542,PCBA-720551,PCBA-720553,PCBA-720579,PCBA-720580,PCBA-720707,PCBA-720708,PCBA-720709,PCBA-720711,PCBA-743255,PCBA-743266,PCBA-875,PCBA-881,PCBA-883,PCBA-884,PCBA-885,PCBA-887,PCBA-891,PCBA-899,PCBA-902,PCBA-903,PCBA-904,PCBA-912,PCBA-914,PCBA-915,PCBA-924,PCBA-925,PCBA-926,PCBA-927,PCBA-938,PCBA-995".split(
        ',')
    self.pcba_146_assay_list = "PCBA-1030,PCBA-1379,PCBA-1452,PCBA-1454,PCBA-1457,PCBA-1458,PCBA-1460,PCBA-1461,PCBA-1468,PCBA-1469,PCBA-1471,PCBA-1479,PCBA-1631,PCBA-1634,PCBA-1688,PCBA-1721,PCBA-2100,PCBA-2101,PCBA-2147,PCBA-2242,PCBA-2326,PCBA-2451,PCBA-2517,PCBA-2528,PCBA-2546,PCBA-2549,PCBA-2551,PCBA-2662,PCBA-2675,PCBA-2676,PCBA-411,PCBA-463254,PCBA-485281,PCBA-485290,PCBA-485294,PCBA-485297,PCBA-485313,PCBA-485314,PCBA-485341,PCBA-485349,PCBA-485353,PCBA-485360,PCBA-485364,PCBA-485367,PCBA-492947,PCBA-493208,PCBA-504327,PCBA-504332,PCBA-504333,PCBA-504339,PCBA-504444,PCBA-504466,PCBA-504467,PCBA-504706,PCBA-504842,PCBA-504845,PCBA-504847,PCBA-504891,PCBA-540276,PCBA-540317,PCBA-588342,PCBA-588453,PCBA-588456,PCBA-588579,PCBA-588590,PCBA-588591,PCBA-588795,PCBA-588855,PCBA-602179,PCBA-602233,PCBA-602310,PCBA-602313,PCBA-602332,PCBA-624170,PCBA-624171,PCBA-624173,PCBA-624202,PCBA-624246,PCBA-624287,PCBA-624288,PCBA-624291,PCBA-624296,PCBA-624297,PCBA-624417,PCBA-651635,PCBA-651644,PCBA-651768,PCBA-651965,PCBA-652025,PCBA-652104,PCBA-652105,PCBA-652106,PCBA-686970,PCBA-686978,PCBA-686979,PCBA-720504,PCBA-720532,PCBA-720542,PCBA-720551,PCBA-720553,PCBA-720579,PCBA-720580,PCBA-720707,PCBA-720708,PCBA-720709,PCBA-720711,PCBA-743255,PCBA-743266,PCBA-875,PCBA-881,PCBA-883,PCBA-884,PCBA-885,PCBA-887,PCBA-891,PCBA-899,PCBA-902,PCBA-903,PCBA-904,PCBA-912,PCBA-914,PCBA-915,PCBA-924,PCBA-925,PCBA-926,PCBA-927,PCBA-938,PCBA-995,PCBA-686971,PCBA-504834,PCBA-588856,PCBA-720533,PCBA-1865,PCBA-651820,PCBA-923,PCBA-493014,PCBA-504648,PCBA-624418,PCBA-1159614,PCBA-2289,PCBA-1159524,PCBA-1463,PCBA-504832,PCBA-540256,PCBA-485298,PCBA-2685".split(
        ',')
    self.pcba_2475_assay_list = "PCBA-1259344,PCBA-588834,PCBA-1159536,PCBA-1259321,PCBA-1259320,PCBA-1259256,PCBA-1259255,PCBA-1259253,PCBA-1259252,PCBA-1159605,PCBA-1159604,PCBA-1259244,PCBA-1259243,PCBA-1259242,PCBA-1259241,PCBA-720687,PCBA-720675,PCBA-720674,PCBA-1224890,PCBA-1224889,PCBA-1224888,PCBA-1224887,PCBA-1224886,PCBA-1224885,PCBA-1224884,PCBA-1224883,PCBA-1224882,PCBA-1224881,PCBA-1224880,PCBA-1224879,PCBA-1224878,PCBA-1224877,PCBA-1224876,PCBA-1224875,PCBA-1224874,PCBA-1224873,PCBA-1224872,PCBA-1224871,PCBA-1224870,PCBA-1224869,PCBA-1224868,PCBA-1224867,PCBA-1224862,PCBA-1224861,PCBA-1224860,PCBA-1224859,PCBA-1224858,PCBA-1224857,PCBA-1224856,PCBA-1224855,PCBA-1224854,PCBA-1224853,PCBA-1224863,PCBA-1224847,PCBA-1224846,PCBA-1224845,PCBA-1224844,PCBA-1224843,PCBA-1224839,PCBA-1224838,PCBA-1224837,PCBA-1224836,PCBA-1224835,PCBA-1224823,PCBA-1224822,PCBA-1224821,PCBA-1224820,PCBA-1224819,PCBA-1224818,PCBA-1159614,PCBA-1159513,PCBA-1159512,PCBA-1159511,PCBA-1159510,PCBA-1382,PCBA-1159577,PCBA-1159574,PCBA-1159573,PCBA-1159572,PCBA-1159571,PCBA-1159570,PCBA-1159569,PCBA-1159568,PCBA-1159567,PCBA-1159566,PCBA-1117284,PCBA-1159553,PCBA-1159552,PCBA-1159551,PCBA-1117274,PCBA-1117272,PCBA-1117271,PCBA-720691,PCBA-1053202,PCBA-1159529,PCBA-1159527,PCBA-1053204,PCBA-1053203,PCBA-1159526,PCBA-1159525,PCBA-1159524,PCBA-1117265,PCBA-1053181,PCBA-1159521,PCBA-1159520,PCBA-1053169,PCBA-1053167,PCBA-1159517,PCBA-1159516,PCBA-1159515,PCBA-1053141,PCBA-1053140,PCBA-1053134,PCBA-1053132,PCBA-1053121,PCBA-1053120,PCBA-977620,PCBA-977612,PCBA-977609,PCBA-977617,PCBA-977616,PCBA-977615,PCBA-743509,PCBA-743507,PCBA-743497,PCBA-743483,PCBA-743481,PCBA-743440,PCBA-743417,PCBA-743413,PCBA-743403,PCBA-743399,PCBA-743381,PCBA-743434,PCBA-743422,PCBA-743373,PCBA-1117362,PCBA-1117361,PCBA-1117358,PCBA-1117359,PCBA-743372,PCBA-743296,PCBA-743284,PCBA-743425,PCBA-743234,PCBA-743231,PCBA-743229,PCBA-743450,PCBA-743423,PCBA-743404,PCBA-743400,PCBA-743389,PCBA-743384,PCBA-743186,PCBA-743183,PCBA-743175,PCBA-743181,PCBA-743172,PCBA-743167,PCBA-1117295,PCBA-743154,PCBA-743153,PCBA-743125,PCBA-743124,PCBA-743408,PCBA-743360,PCBA-743357,PCBA-743316,PCBA-743312,PCBA-743311,PCBA-743308,PCBA-743307,PCBA-743305,PCBA-743304,PCBA-743303,PCBA-743302,PCBA-743298,PCBA-743159,PCBA-743131,PCBA-743129,PCBA-743128,PCBA-743123,PCBA-743095,PCBA-720728,PCBA-743115,PCBA-743111,PCBA-743104,PCBA-743102,PCBA-743097,PCBA-743068,PCBA-743062,PCBA-743022,PCBA-743026,PCBA-743016,PCBA-720715,PCBA-720714,PCBA-720696,PCBA-720695,PCBA-720673,PCBA-720672,PCBA-720671,PCBA-720651,PCBA-720649,PCBA-743195,PCBA-743187,PCBA-743179,PCBA-743178,PCBA-743171,PCBA-743170,PCBA-743161,PCBA-1117277,PCBA-743083,PCBA-720622,PCBA-743225,PCBA-743224,PCBA-743223,PCBA-743222,PCBA-743221,PCBA-743220,PCBA-743218,PCBA-743217,PCBA-743215,PCBA-743213,PCBA-743212,PCBA-743211,PCBA-743210,PCBA-743209,PCBA-743203,PCBA-743202,PCBA-743194,PCBA-743191,PCBA-743094,PCBA-743086,PCBA-743085,PCBA-743084,PCBA-743081,PCBA-720590,PCBA-743080,PCBA-743079,PCBA-743075,PCBA-743074,PCBA-743069,PCBA-743066,PCBA-743065,PCBA-743064,PCBA-743042,PCBA-743041,PCBA-743040,PCBA-743036,PCBA-743035,PCBA-743033,PCBA-743015,PCBA-743014,PCBA-743012,PCBA-720693,PCBA-720692,PCBA-720686,PCBA-720685,PCBA-720684,PCBA-720683,PCBA-720682,PCBA-720681,PCBA-720680,PCBA-720679,PCBA-720678,PCBA-720635,PCBA-720634,PCBA-651634,PCBA-651633,PCBA-651632,PCBA-651631,PCBA-743110,PCBA-743058,PCBA-743057,PCBA-743056,PCBA-743055,PCBA-1053205,PCBA-720595,PCBA-720593,PCBA-720568,PCBA-720567,PCBA-720562,PCBA-1053185,PCBA-1053184,PCBA-1053183,PCBA-1053174,PCBA-1053173,PCBA-651917,PCBA-651734,PCBA-624284,PCBA-624063,PCBA-602455,PCBA-602241,PCBA-624078,PCBA-1053144,PCBA-1053143,PCBA-743244,PCBA-743146,PCBA-743142,PCBA-1053127,PCBA-1053126,PCBA-1053125,PCBA-1053124,PCBA-1053122,PCBA-1053119,PCBA-1053118,PCBA-1053117,PCBA-1053115,PCBA-1035475,PCBA-686993,PCBA-743342,PCBA-977607,PCBA-977606,PCBA-977605,PCBA-686969,PCBA-686967,PCBA-686962,PCBA-686961,PCBA-623995,PCBA-743479,PCBA-743478,PCBA-743477,PCBA-743472,PCBA-743471,PCBA-743470,PCBA-743464,PCBA-743453,PCBA-743452,PCBA-743441,PCBA-743446,PCBA-743444,PCBA-743416,PCBA-743415,PCBA-743412,PCBA-743402,PCBA-743396,PCBA-743395,PCBA-743394,PCBA-686932,PCBA-686917,PCBA-686916,PCBA-686915,PCBA-652285,PCBA-652283,PCBA-652282,PCBA-652276,PCBA-743327,PCBA-743326,PCBA-743325,PCBA-652250,PCBA-652227,PCBA-743343,PCBA-743341,PCBA-743340,PCBA-743329,PCBA-652222,PCBA-652198,PCBA-652196,PCBA-743339,PCBA-652207,PCBA-743336,PCBA-652179,PCBA-652170,PCBA-652287,PCBA-652286,PCBA-652165,PCBA-652161,PCBA-743319,PCBA-743317,PCBA-743314,PCBA-652177,PCBA-652265,PCBA-652123,PCBA-652112,PCBA-743297,PCBA-743295,PCBA-743294,PCBA-743293,PCBA-743292,PCBA-743291,PCBA-743288,PCBA-2675,PCBA-743049,PCBA-652060,PCBA-652059,PCBA-720608,PCBA-720605,PCBA-720624,PCBA-720607,PCBA-720602,PCBA-720598,PCBA-743276,PCBA-743275,PCBA-743197,PCBA-743150,PCBA-743149,PCBA-743145,PCBA-743144,PCBA-743048,PCBA-743047,PCBA-743046,PCBA-743045,PCBA-743044,PCBA-743043,PCBA-743021,PCBA-743020,PCBA-519,PCBA-743267,PCBA-743266,PCBA-652173,PCBA-489002,PCBA-720701,PCBA-743262,PCBA-743260,PCBA-743259,PCBA-652172,PCBA-743255,PCBA-743254,PCBA-651977,PCBA-651976,PCBA-489003,PCBA-743245,PCBA-652046,PCBA-652043,PCBA-624288,PCBA-651913,PCBA-651912,PCBA-720726,PCBA-652289,PCBA-720727,PCBA-651875,PCBA-651872,PCBA-651855,PCBA-651853,PCBA-651849,PCBA-651842,PCBA-651874,PCBA-651862,PCBA-743059,PCBA-651790,PCBA-651788,PCBA-652183,PCBA-652180,PCBA-652175,PCBA-651775,PCBA-651920,PCBA-651996,PCBA-743019,PCBA-652164,PCBA-652140,PCBA-720729,PCBA-686933,PCBA-651753,PCBA-652211,PCBA-652194,PCBA-720724,PCBA-720711,PCBA-720709,PCBA-720708,PCBA-720707,PCBA-651760,PCBA-720697,PCBA-720690,PCBA-652077,PCBA-652034,PCBA-652033,PCBA-652032,PCBA-651676,PCBA-651670,PCBA-720659,PCBA-720653,PCBA-720652,PCBA-720650,PCBA-720646,PCBA-720645,PCBA-720512,PCBA-720636,PCBA-720632,PCBA-651947,PCBA-651605,PCBA-651642,PCBA-720597,PCBA-720591,PCBA-720589,PCBA-720588,PCBA-720587,PCBA-720586,PCBA-720584,PCBA-720579,PCBA-720580,PCBA-720578,PCBA-720577,PCBA-720576,PCBA-720575,PCBA-720573,PCBA-720572,PCBA-624496,PCBA-624495,PCBA-720569,PCBA-720537,PCBA-720570,PCBA-720564,PCBA-687026,PCBA-687023,PCBA-686931,PCBA-686930,PCBA-686929,PCBA-686928,PCBA-652239,PCBA-624500,PCBA-624460,PCBA-651841,PCBA-651816,PCBA-720565,PCBA-720553,PCBA-720551,PCBA-687040,PCBA-651837,PCBA-651836,PCBA-651809,PCBA-624473,PCBA-624458,PCBA-720548,PCBA-720542,PCBA-651835,PCBA-720538,PCBA-720534,PCBA-624439,PCBA-624425,PCBA-624410,PCBA-624409,PCBA-720541,PCBA-720540,PCBA-720536,PCBA-720535,PCBA-720533,PCBA-720532,PCBA-720528,PCBA-720527,PCBA-720526,PCBA-720525,PCBA-720524,PCBA-720523,PCBA-720522,PCBA-720519,PCBA-651840,PCBA-651839,PCBA-720518,PCBA-720517,PCBA-652280,PCBA-652275,PCBA-651863,PCBA-651829,PCBA-651807,PCBA-720514,PCBA-720513,PCBA-720498,PCBA-651854,PCBA-651845,PCBA-2517,PCBA-651878,PCBA-720507,PCBA-720506,PCBA-652019,PCBA-624373,PCBA-720504,PCBA-720503,PCBA-720502,PCBA-720501,PCBA-720500,PCBA-720499,PCBA-720497,PCBA-720496,PCBA-720495,PCBA-720494,PCBA-720493,PCBA-686947,PCBA-651795,PCBA-651773,PCBA-651772,PCBA-651771,PCBA-651770,PCBA-651591,PCBA-651588,PCBA-651586,PCBA-651585,PCBA-651584,PCBA-624492,PCBA-624490,PCBA-624489,PCBA-624488,PCBA-624440,PCBA-624430,PCBA-624429,PCBA-624428,PCBA-624427,PCBA-624426,PCBA-624364,PCBA-624368,PCBA-624366,PCBA-624363,PCBA-624362,PCBA-720491,PCBA-720490,PCBA-651577,PCBA-624324,PCBA-624316,PCBA-624315,PCBA-687032,PCBA-687031,PCBA-687030,PCBA-687029,PCBA-687028,PCBA-687027,PCBA-624299,PCBA-624290,PCBA-624289,PCBA-686948,PCBA-687022,PCBA-624275,PCBA-624270,PCBA-687020,PCBA-624259,PCBA-687017,PCBA-687013,PCBA-687005,PCBA-687004,PCBA-687003,PCBA-687002,PCBA-687001,PCBA-687000,PCBA-686999,PCBA-686998,PCBA-686997,PCBA-686994,PCBA-686991,PCBA-686985,PCBA-686984,PCBA-686980,PCBA-686979,PCBA-686978,PCBA-651752,PCBA-624376,PCBA-624375,PCBA-624374,PCBA-624372,PCBA-624369,PCBA-624367,PCBA-624365,PCBA-624361,PCBA-624360,PCBA-624359,PCBA-624391,PCBA-624389,PCBA-686971,PCBA-686970,PCBA-686960,PCBA-686959,PCBA-686957,PCBA-652193,PCBA-624205,PCBA-624177,PCBA-624176,PCBA-624175,PCBA-624164,PCBA-624163,PCBA-624174,PCBA-624075,PCBA-624074,PCBA-624073,PCBA-624072,PCBA-686920,PCBA-624107,PCBA-624106,PCBA-624105,PCBA-624104,PCBA-624056,PCBA-624055,PCBA-624049,PCBA-624048,PCBA-624047,PCBA-624046,PCBA-624045,PCBA-624034,PCBA-624027,PCBA-624020,PCBA-624018,PCBA-624016,PCBA-624014,PCBA-624012,PCBA-624011,PCBA-624023,PCBA-624019,PCBA-624006,PCBA-623998,PCBA-623993,PCBA-623991,PCBA-652252,PCBA-624094,PCBA-624093,PCBA-623985,PCBA-623981,PCBA-623969,PCBA-623965,PCBA-652244,PCBA-652242,PCBA-652241,PCBA-623973,PCBA-623972,PCBA-623970,PCBA-623966,PCBA-623951,PCBA-623950,PCBA-623912,PCBA-652208,PCBA-623945,PCBA-623938,PCBA-623904,PCBA-623903,PCBA-623899,PCBA-623897,PCBA-623894,PCBA-623887,PCBA-623885,PCBA-623881,PCBA-652156,PCBA-623883,PCBA-623876,PCBA-623873,PCBA-623864,PCBA-623863,PCBA-623875,PCBA-652145,PCBA-623934,PCBA-623930,PCBA-652135,PCBA-624029,PCBA-624024,PCBA-652128,PCBA-652127,PCBA-652121,PCBA-652116,PCBA-651579,PCBA-651563,PCBA-624474,PCBA-623895,PCBA-623880,PCBA-602414,PCBA-602408,PCBA-652106,PCBA-652105,PCBA-652104,PCBA-602394,PCBA-652102,PCBA-652101,PCBA-602391,PCBA-602373,PCBA-602371,PCBA-602370,PCBA-602366,PCBA-602362,PCBA-623947,PCBA-588775,PCBA-602308,PCBA-602306,PCBA-602285,PCBA-652062,PCBA-652058,PCBA-652057,PCBA-652053,PCBA-652047,PCBA-602269,PCBA-602268,PCBA-652042,PCBA-652041,PCBA-652040,PCBA-602407,PCBA-602316,PCBA-602309,PCBA-488949,PCBA-652025,PCBA-652016,PCBA-652015,PCBA-652023,PCBA-602288,PCBA-602258,PCBA-602256,PCBA-652006,PCBA-652005,PCBA-602317,PCBA-651989,PCBA-602242,PCBA-602190,PCBA-602189,PCBA-602187,PCBA-602186,PCBA-602185,PCBA-602184,PCBA-651971,PCBA-651970,PCBA-651968,PCBA-624162,PCBA-651967,PCBA-540355,PCBA-2769,PCBA-2768,PCBA-2756,PCBA-2755,PCBA-2754,PCBA-1926,PCBA-1919,PCBA-651965,PCBA-651713,PCBA-651712,PCBA-624479,PCBA-624476,PCBA-602227,PCBA-602225,PCBA-602223,PCBA-602222,PCBA-602221,PCBA-602220,PCBA-602219,PCBA-602218,PCBA-602216,PCBA-602214,PCBA-602165,PCBA-602164,PCBA-651956,PCBA-602161,PCBA-602160,PCBA-602158,PCBA-651939,PCBA-651937,PCBA-602129,PCBA-602121,PCBA-602126,PCBA-651848,PCBA-651823,PCBA-651595,PCBA-651593,PCBA-588842,PCBA-651745,PCBA-651675,PCBA-651820,PCBA-588828,PCBA-588826,PCBA-651818,PCBA-651817,PCBA-651815,PCBA-651814,PCBA-651813,PCBA-651812,PCBA-602310,PCBA-651804,PCBA-651802,PCBA-651793,PCBA-651791,PCBA-651789,PCBA-651784,PCBA-651768,PCBA-651778,PCBA-651777,PCBA-588810,PCBA-651758,PCBA-651757,PCBA-651755,PCBA-651754,PCBA-651751,PCBA-651749,PCBA-588771,PCBA-651743,PCBA-651741,PCBA-588776,PCBA-651700,PCBA-588777,PCBA-588754,PCBA-651720,PCBA-588757,PCBA-588756,PCBA-588751,PCBA-588743,PCBA-588741,PCBA-588715,PCBA-588712,PCBA-588711,PCBA-651717,PCBA-651709,PCBA-651705,PCBA-651697,PCBA-588724,PCBA-651693,PCBA-651692,PCBA-651684,PCBA-588673,PCBA-651683,PCBA-651680,PCBA-651673,PCBA-651672,PCBA-588634,PCBA-588632,PCBA-588629,PCBA-651657,PCBA-651635,PCBA-588631,PCBA-588630,PCBA-588628,PCBA-588626,PCBA-588624,PCBA-588553,PCBA-588548,PCBA-651644,PCBA-602404,PCBA-602400,PCBA-588530,PCBA-588529,PCBA-651630,PCBA-602427,PCBA-602356,PCBA-602334,PCBA-588503,PCBA-588495,PCBA-588480,PCBA-602434,PCBA-588717,PCBA-588714,PCBA-588707,PCBA-588696,PCBA-588688,PCBA-588680,PCBA-588679,PCBA-588678,PCBA-588594,PCBA-588570,PCBA-588558,PCBA-588557,PCBA-588556,PCBA-602425,PCBA-602133,PCBA-602131,PCBA-588671,PCBA-588593,PCBA-588588,PCBA-588415,PCBA-651600,PCBA-651599,PCBA-588426,PCBA-588425,PCBA-651597,PCBA-588392,PCBA-588390,PCBA-588404,PCBA-588396,PCBA-588394,PCBA-588388,PCBA-588387,PCBA-588385,PCBA-588384,PCBA-588365,PCBA-588363,PCBA-651570,PCBA-651569,PCBA-651568,PCBA-651567,PCBA-651565,PCBA-651564,PCBA-651561,PCBA-624394,PCBA-602464,PCBA-651559,PCBA-651558,PCBA-588399,PCBA-588374,PCBA-588372,PCBA-588371,PCBA-588331,PCBA-588330,PCBA-588329,PCBA-588324,PCBA-624503,PCBA-624501,PCBA-624493,PCBA-624491,PCBA-540363,PCBA-624487,PCBA-540353,PCBA-540352,PCBA-540350,PCBA-540348,PCBA-540347,PCBA-540339,PCBA-540360,PCBA-540354,PCBA-540338,PCBA-624455,PCBA-588846,PCBA-588845,PCBA-588844,PCBA-588840,PCBA-588321,PCBA-624418,PCBA-624417,PCBA-540318,PCBA-540316,PCBA-540315,PCBA-540314,PCBA-540312,PCBA-624405,PCBA-624404,PCBA-624403,PCBA-624395,PCBA-624385,PCBA-624384,PCBA-624383,PCBA-624382,PCBA-588449,PCBA-540266,PCBA-540264,PCBA-504943,PCBA-504939,PCBA-540323,PCBA-624351,PCBA-624330,PCBA-624343,PCBA-624347,PCBA-624344,PCBA-624337,PCBA-624336,PCBA-624335,PCBA-624322,PCBA-624317,PCBA-624332,PCBA-624331,PCBA-624329,PCBA-624328,PCBA-624327,PCBA-624326,PCBA-540322,PCBA-624312,PCBA-624308,PCBA-602251,PCBA-504837,PCBA-624305,PCBA-588435,PCBA-504831,PCBA-504828,PCBA-504820,PCBA-504818,PCBA-624300,PCBA-624298,PCBA-624297,PCBA-624296,PCBA-624291,PCBA-624287,PCBA-624285,PCBA-624274,PCBA-624273,PCBA-624265,PCBA-624261,PCBA-624258,PCBA-624254,PCBA-624253,PCBA-624252,PCBA-624251,PCBA-624250,PCBA-624249,PCBA-624248,PCBA-624247,PCBA-624246,PCBA-504826,PCBA-504823,PCBA-624245,PCBA-624244,PCBA-624243,PCBA-602338,PCBA-588802,PCBA-588770,PCBA-504569,PCBA-504566,PCBA-1690,PCBA-1689,PCBA-624241,PCBA-624173,PCBA-504937,PCBA-624207,PCBA-504789,PCBA-504788,PCBA-624202,PCBA-624172,PCBA-624171,PCBA-624170,PCBA-624166,PCBA-624161,PCBA-624160,PCBA-504891,PCBA-504769,PCBA-624147,PCBA-624146,PCBA-624145,PCBA-588377,PCBA-588373,PCBA-624134,PCBA-624133,PCBA-624132,PCBA-504755,PCBA-624116,PCBA-624044,PCBA-624032,PCBA-624031,PCBA-624030,PCBA-588647,PCBA-588639,PCBA-588611,PCBA-588609,PCBA-588607,PCBA-588605,PCBA-588575,PCBA-504703,PCBA-504702,PCBA-504687,PCBA-504685,PCBA-2566,PCBA-504674,PCBA-504655,PCBA-624089,PCBA-624087,PCBA-602437,PCBA-602435,PCBA-602433,PCBA-602431,PCBA-504911,PCBA-504910,PCBA-504909,PCBA-504903,PCBA-504901,PCBA-504898,PCBA-504897,PCBA-504667,PCBA-504666,PCBA-602136,PCBA-588857,PCBA-588447,PCBA-588443,PCBA-588437,PCBA-504860,PCBA-504857,PCBA-504854,PCBA-504853,PCBA-504852,PCBA-504654,PCBA-504650,PCBA-504649,PCBA-624002,PCBA-602179,PCBA-504713,PCBA-623996,PCBA-623994,PCBA-623992,PCBA-623989,PCBA-623978,PCBA-623955,PCBA-588572,PCBA-588555,PCBA-623861,PCBA-602469,PCBA-504684,PCBA-504683,PCBA-504682,PCBA-504646,PCBA-504645,PCBA-504597,PCBA-504588,PCBA-602374,PCBA-602372,PCBA-602367,PCBA-504572,PCBA-602478,PCBA-602477,PCBA-602476,PCBA-602475,PCBA-602474,PCBA-504642,PCBA-504640,PCBA-504576,PCBA-504575,PCBA-504574,PCBA-504573,PCBA-504571,PCBA-504570,PCBA-504564,PCBA-504562,PCBA-504561,PCBA-504556,PCBA-504551,PCBA-504535,PCBA-504533,PCBA-504695,PCBA-504694,PCBA-504693,PCBA-504563,PCBA-504560,PCBA-504559,PCBA-504557,PCBA-504555,PCBA-504553,PCBA-504524,PCBA-504504,PCBA-504502,PCBA-504526,PCBA-504518,PCBA-504516,PCBA-504509,PCBA-504508,PCBA-504485,PCBA-602376,PCBA-602304,PCBA-602257,PCBA-602389,PCBA-602388,PCBA-602386,PCBA-602384,PCBA-602382,PCBA-602380,PCBA-602378,PCBA-602377,PCBA-602375,PCBA-602332,PCBA-602369,PCBA-602368,PCBA-602365,PCBA-602364,PCBA-602361,PCBA-504450,PCBA-504449,PCBA-602358,PCBA-602357,PCBA-602350,PCBA-602296,PCBA-588620,PCBA-588608,PCBA-588606,PCBA-588604,PCBA-588563,PCBA-504440,PCBA-602328,PCBA-602326,PCBA-602313,PCBA-602298,PCBA-588401,PCBA-492949,PCBA-602293,PCBA-602292,PCBA-588583,PCBA-588581,PCBA-588568,PCBA-588566,PCBA-588564,PCBA-540371,PCBA-540368,PCBA-540365,PCBA-540349,PCBA-504889,PCBA-504870,PCBA-504868,PCBA-504867,PCBA-504433,PCBA-504432,PCBA-504530,PCBA-504395,PCBA-504394,PCBA-504393,PCBA-504388,PCBA-504409,PCBA-504360,PCBA-504353,PCBA-504347,PCBA-504367,PCBA-504363,PCBA-504358,PCBA-504349,PCBA-504341,PCBA-602208,PCBA-588637,PCBA-504503,PCBA-504484,PCBA-504352,PCBA-504335,PCBA-504633,PCBA-504631,PCBA-504413,PCBA-504331,PCBA-504325,PCBA-504323,PCBA-493250,PCBA-493249,PCBA-602263,PCBA-493239,PCBA-493238,PCBA-493237,PCBA-493235,PCBA-493234,PCBA-493230,PCBA-493228,PCBA-493227,PCBA-493226,PCBA-493225,PCBA-493213,PCBA-602259,PCBA-504608,PCBA-504604,PCBA-504599,PCBA-504931,PCBA-493198,PCBA-493196,PCBA-493195,PCBA-493193,PCBA-493181,PCBA-493180,PCBA-493176,PCBA-588412,PCBA-2576,PCBA-2533,PCBA-493167,PCBA-504390,PCBA-493215,PCBA-493150,PCBA-493149,PCBA-493147,PCBA-493145,PCBA-493142,PCBA-493141,PCBA-493139,PCBA-493137,PCBA-493135,PCBA-493134,PCBA-493133,PCBA-493132,PCBA-493126,PCBA-602236,PCBA-602235,PCBA-602234,PCBA-493130,PCBA-493112,PCBA-602233,PCBA-493095,PCBA-493092,PCBA-602217,PCBA-602215,PCBA-493099,PCBA-493082,PCBA-493081,PCBA-602211,PCBA-602210,PCBA-588780,PCBA-588779,PCBA-602198,PCBA-602188,PCBA-493089,PCBA-493080,PCBA-493069,PCBA-493064,PCBA-493060,PCBA-602204,PCBA-602202,PCBA-602201,PCBA-602200,PCBA-602199,PCBA-493093,PCBA-493053,PCBA-493051,PCBA-493050,PCBA-493037,PCBA-602191,PCBA-602176,PCBA-493015,PCBA-493013,PCBA-602168,PCBA-602167,PCBA-602166,PCBA-449756,PCBA-449750,PCBA-449749,PCBA-434945,PCBA-2631,PCBA-2630,PCBA-2519,PCBA-2398,PCBA-588355,PCBA-540304,PCBA-602127,PCBA-588856,PCBA-493038,PCBA-588855,PCBA-493113,PCBA-588851,PCBA-588849,PCBA-588848,PCBA-588847,PCBA-492954,PCBA-588827,PCBA-588811,PCBA-588505,PCBA-588504,PCBA-540276,PCBA-588809,PCBA-588799,PCBA-588795,PCBA-489039,PCBA-489038,PCBA-489037,PCBA-489036,PCBA-489011,PCBA-588790,PCBA-588783,PCBA-504792,PCBA-588727,PCBA-488985,PCBA-588763,PCBA-504415,PCBA-504359,PCBA-588742,PCBA-588719,PCBA-488976,PCBA-588720,PCBA-488958,PCBA-588689,PCBA-588681,PCBA-588524,PCBA-588359,PCBA-540334,PCBA-492960,PCBA-488913,PCBA-488908,PCBA-488948,PCBA-488934,PCBA-488914,PCBA-488897,PCBA-488891,PCBA-588603,PCBA-588601,PCBA-588600,PCBA-588599,PCBA-588598,PCBA-588591,PCBA-588590,PCBA-588586,PCBA-588579,PCBA-488830,PCBA-488828,PCBA-588554,PCBA-588525,PCBA-588498,PCBA-488858,PCBA-488843,PCBA-488820,PCBA-488803,PCBA-463199,PCBA-435010,PCBA-588547,PCBA-588546,PCBA-588545,PCBA-588544,PCBA-588541,PCBA-588538,PCBA-588537,PCBA-588535,PCBA-588533,PCBA-588532,PCBA-588526,PCBA-488811,PCBA-488810,PCBA-488805,PCBA-488804,PCBA-588516,PCBA-588515,PCBA-588514,PCBA-588513,PCBA-588502,PCBA-588481,PCBA-2423,PCBA-2400,PCBA-2388,PCBA-2387,PCBA-2327,PCBA-504501,PCBA-504497,PCBA-504492,PCBA-504488,PCBA-588463,PCBA-588456,PCBA-540257,PCBA-540254,PCBA-488797,PCBA-488770,PCBA-588453,PCBA-588451,PCBA-588442,PCBA-588440,PCBA-588439,PCBA-588382,PCBA-588379,PCBA-588434,PCBA-588429,PCBA-504673,PCBA-504671,PCBA-504670,PCBA-504665,PCBA-504664,PCBA-504641,PCBA-504517,PCBA-504514,PCBA-504512,PCBA-504489,PCBA-493251,PCBA-488782,PCBA-588411,PCBA-588406,PCBA-588400,PCBA-588398,PCBA-588397,PCBA-588378,PCBA-504927,PCBA-504500,PCBA-588361,PCBA-588349,PCBA-588348,PCBA-588347,PCBA-588345,PCBA-588344,PCBA-588343,PCBA-588342,PCBA-488827,PCBA-488808,PCBA-488795,PCBA-488792,PCBA-588341,PCBA-588340,PCBA-588339,PCBA-504539,PCBA-463185,PCBA-463184,PCBA-504362,PCBA-540327,PCBA-540362,PCBA-463109,PCBA-540359,PCBA-540356,PCBA-540346,PCBA-540343,PCBA-504840,PCBA-540335,PCBA-540326,PCBA-540288,PCBA-540317,PCBA-463080,PCBA-463077,PCBA-463076,PCBA-489004,PCBA-488901,PCBA-504834,PCBA-485352,PCBA-504832,PCBA-540298,PCBA-540297,PCBA-540296,PCBA-540256,PCBA-540280,PCBA-540279,PCBA-540271,PCBA-540270,PCBA-540269,PCBA-540268,PCBA-540259,PCBA-540258,PCBA-540255,PCBA-504659,PCBA-504658,PCBA-540252,PCBA-540246,PCBA-504944,PCBA-504942,PCBA-504941,PCBA-504932,PCBA-449733,PCBA-504895,PCBA-504882,PCBA-435031,PCBA-435029,PCBA-504865,PCBA-504861,PCBA-504850,PCBA-504848,PCBA-504847,PCBA-504845,PCBA-504843,PCBA-504842,PCBA-504841,PCBA-492994,PCBA-492987,PCBA-492996,PCBA-488950,PCBA-488943,PCBA-488932,PCBA-488931,PCBA-488930,PCBA-488909,PCBA-488907,PCBA-488905,PCBA-488870,PCBA-488868,PCBA-488867,PCBA-488866,PCBA-488848,PCBA-488844,PCBA-488836,PCBA-488809,PCBA-488807,PCBA-488802,PCBA-463074,PCBA-504806,PCBA-504724,PCBA-434967,PCBA-434957,PCBA-434935,PCBA-434930,PCBA-434923,PCBA-504765,PCBA-434946,PCBA-504763,PCBA-504762,PCBA-504756,PCBA-463142,PCBA-463081,PCBA-2838,PCBA-2802,PCBA-504730,PCBA-504729,PCBA-504728,PCBA-504727,PCBA-504726,PCBA-504725,PCBA-504723,PCBA-504722,PCBA-504719,PCBA-488935,PCBA-488925,PCBA-488842,PCBA-488826,PCBA-488819,PCBA-463227,PCBA-463105,PCBA-434981,PCBA-485287,PCBA-485285,PCBA-485278,PCBA-485277,PCBA-2822,PCBA-2820,PCBA-504706,PCBA-2812,PCBA-2788,PCBA-2791,PCBA-504701,PCBA-504699,PCBA-504697,PCBA-504689,PCBA-504672,PCBA-504544,PCBA-485295,PCBA-463251,PCBA-463250,PCBA-463107,PCBA-504648,PCBA-488854,PCBA-488851,PCBA-488850,PCBA-488849,PCBA-488838,PCBA-488832,PCBA-488821,PCBA-504549,PCBA-504542,PCBA-493003,PCBA-434951,PCBA-434938,PCBA-2744,PCBA-2742,PCBA-2740,PCBA-504637,PCBA-504636,PCBA-504548,PCBA-504453,PCBA-504447,PCBA-504446,PCBA-2748,PCBA-493002,PCBA-2843,PCBA-2750,PCBA-2739,PCBA-2738,PCBA-504609,PCBA-504565,PCBA-2684,PCBA-2678,PCBA-2649,PCBA-2644,PCBA-504547,PCBA-504546,PCBA-504536,PCBA-493094,PCBA-504467,PCBA-504466,PCBA-504465,PCBA-504444,PCBA-504320,PCBA-504318,PCBA-504316,PCBA-504315,PCBA-504314,PCBA-493247,PCBA-493243,PCBA-493242,PCBA-493233,PCBA-493229,PCBA-489005,PCBA-485288,PCBA-2537,PCBA-2102,PCBA-1903,PCBA-881,PCBA-852,PCBA-728,PCBA-716,PCBA-493197,PCBA-2474,PCBA-504397,PCBA-449748,PCBA-2573,PCBA-2565,PCBA-2564,PCBA-504364,PCBA-504339,PCBA-504333,PCBA-504332,PCBA-504329,PCBA-504327,PCBA-493194,PCBA-504322,PCBA-504313,PCBA-493248,PCBA-493177,PCBA-493240,PCBA-493231,PCBA-493218,PCBA-434941,PCBA-434937,PCBA-493214,PCBA-493212,PCBA-493210,PCBA-493208,PCBA-493206,PCBA-493205,PCBA-493204,PCBA-493203,PCBA-493201,PCBA-493200,PCBA-493199,PCBA-493192,PCBA-493191,PCBA-493188,PCBA-493185,PCBA-493182,PCBA-493179,PCBA-2347,PCBA-493174,PCBA-493170,PCBA-493169,PCBA-493168,PCBA-493166,PCBA-493165,PCBA-493054,PCBA-493052,PCBA-493049,PCBA-493045,PCBA-493100,PCBA-493155,PCBA-493153,PCBA-488837,PCBA-493107,PCBA-493106,PCBA-493102,PCBA-435004,PCBA-493085,PCBA-493083,PCBA-493078,PCBA-493074,PCBA-493073,PCBA-493071,PCBA-493068,PCBA-493067,PCBA-493066,PCBA-493065,PCBA-1666,PCBA-1655,PCBA-1450,PCBA-449726,PCBA-435027,PCBA-488923,PCBA-488921,PCBA-488892,PCBA-488884,PCBA-488882,PCBA-488876,PCBA-488799,PCBA-488793,PCBA-449737,PCBA-449736,PCBA-449727,PCBA-435032,PCBA-435024,PCBA-435018,PCBA-435011,PCBA-2335,PCBA-2500,PCBA-2497,PCBA-2496,PCBA-2483,PCBA-2475,PCBA-2466,PCBA-2397,PCBA-2359,PCBA-2348,PCBA-2337,PCBA-2334,PCBA-2285,PCBA-2284,PCBA-2801,PCBA-2686,PCBA-2682,PCBA-2654,PCBA-2468,PCBA-2442,PCBA-493020,PCBA-493014,PCBA-2799,PCBA-2798,PCBA-1941,PCBA-1535,PCBA-1958,PCBA-1957,PCBA-1750,PCBA-1749,PCBA-1659,PCBA-1618,PCBA-1512,PCBA-485345,PCBA-492998,PCBA-489010,PCBA-434942,PCBA-492961,PCBA-1569,PCBA-489041,PCBA-489026,PCBA-489022,PCBA-492959,PCBA-492952,PCBA-492950,PCBA-489034,PCBA-489020,PCBA-488890,PCBA-492948,PCBA-489033,PCBA-489006,PCBA-488833,PCBA-489040,PCBA-489025,PCBA-489018,PCBA-492947,PCBA-488791,PCBA-489043,PCBA-489014,PCBA-488773,PCBA-489035,PCBA-489032,PCBA-489027,PCBA-2840,PCBA-2839,PCBA-2834,PCBA-2831,PCBA-2640,PCBA-489024,PCBA-489023,PCBA-488920,PCBA-489012,PCBA-488903,PCBA-2238,PCBA-489008,PCBA-489007,PCBA-485353,PCBA-485284,PCBA-1056,PCBA-1701,PCBA-1538,PCBA-2354,PCBA-485367,PCBA-488983,PCBA-488982,PCBA-488981,PCBA-2101,PCBA-488966,PCBA-2784,PCBA-1017,PCBA-488953,PCBA-2197,PCBA-2185,PCBA-488906,PCBA-488904,PCBA-488888,PCBA-488886,PCBA-488880,PCBA-488879,PCBA-488878,PCBA-488875,PCBA-488874,PCBA-488873,PCBA-485368,PCBA-488863,PCBA-488861,PCBA-488860,PCBA-2705,PCBA-1970,PCBA-488840,PCBA-488835,PCBA-463135,PCBA-2561,PCBA-2113,PCBA-488817,PCBA-488816,PCBA-488815,PCBA-488800,PCBA-488783,PCBA-463211,PCBA-434936,PCBA-434931,PCBA-488789,PCBA-488788,PCBA-488785,PCBA-488752,PCBA-488745,PCBA-463120,PCBA-2743,PCBA-2530,PCBA-485364,PCBA-485360,PCBA-485349,PCBA-485341,PCBA-485313,PCBA-463256,PCBA-2597,PCBA-2596,PCBA-2595,PCBA-2592,PCBA-2590,PCBA-2588,PCBA-2401,PCBA-2704,PCBA-2693,PCBA-2683,PCBA-2635,PCBA-2633,PCBA-2610,PCBA-2525,PCBA-2518,PCBA-2511,PCBA-2396,PCBA-485314,PCBA-485298,PCBA-485297,PCBA-485294,PCBA-485290,PCBA-2662,PCBA-2480,PCBA-2453,PCBA-2446,PCBA-485281,PCBA-463217,PCBA-2568,PCBA-2567,PCBA-2515,PCBA-2514,PCBA-463254,PCBA-2634,PCBA-2547,PCBA-2499,PCBA-2581,PCBA-463229,PCBA-463220,PCBA-463214,PCBA-463206,PCBA-463205,PCBA-463204,PCBA-463203,PCBA-463191,PCBA-2346,PCBA-2332,PCBA-2463,PCBA-2460,PCBA-463127,PCBA-449761,PCBA-449755,PCBA-463106,PCBA-435009,PCBA-435002,PCBA-2819,PCBA-2808,PCBA-2752,PCBA-2664,PCBA-2532,PCBA-463097,PCBA-463096,PCBA-2753,PCBA-463088,PCBA-449766,PCBA-434955,PCBA-435026,PCBA-434968,PCBA-1335,PCBA-449762,PCBA-1769,PCBA-1341,PCBA-1340,PCBA-1339,PCBA-1337,PCBA-1336,PCBA-1334,PCBA-449764,PCBA-449745,PCBA-1333,PCBA-435023,PCBA-2823,PCBA-449754,PCBA-449753,PCBA-1405,PCBA-959,PCBA-958,PCBA-945,PCBA-944,PCBA-942,PCBA-923,PCBA-912,PCBA-907,PCBA-900,PCBA-897,PCBA-896,PCBA-892,PCBA-890,PCBA-889,PCBA-875,PCBA-1519,PCBA-1379,PCBA-995,PCBA-994,PCBA-993,PCBA-989,PCBA-988,PCBA-987,PCBA-986,PCBA-985,PCBA-984,PCBA-983,PCBA-982,PCBA-981,PCBA-980,PCBA-979,PCBA-978,PCBA-977,PCBA-976,PCBA-975,PCBA-974,PCBA-973,PCBA-972,PCBA-971,PCBA-970,PCBA-969,PCBA-968,PCBA-967,PCBA-966,PCBA-965,PCBA-964,PCBA-963,PCBA-962,PCBA-961,PCBA-960,PCBA-955,PCBA-948,PCBA-947,PCBA-946,PCBA-943,PCBA-939,PCBA-938,PCBA-934,PCBA-933,PCBA-931,PCBA-930,PCBA-926,PCBA-925,PCBA-924,PCBA-922,PCBA-921,PCBA-918,PCBA-917,PCBA-916,PCBA-915,PCBA-914,PCBA-910,PCBA-904,PCBA-903,PCBA-902,PCBA-899,PCBA-895,PCBA-891,PCBA-887,PCBA-885,PCBA-884,PCBA-883,PCBA-1026,PCBA-1023,PCBA-434932,PCBA-1376,PCBA-1047,PCBA-1045,PCBA-1028,PCBA-1015,PCBA-856,PCBA-854,PCBA-851,PCBA-435019,PCBA-434958,PCBA-1744,PCBA-435014,PCBA-2326,PCBA-434997,PCBA-434987,PCBA-2311,PCBA-2307,PCBA-2298,PCBA-2296,PCBA-2295,PCBA-2217,PCBA-434976,PCBA-434954,PCBA-434947,PCBA-2603,PCBA-2758,PCBA-2821,PCBA-2538,PCBA-2795,PCBA-2794,PCBA-2787,PCBA-2786,PCBA-2785,PCBA-2451,PCBA-2167,PCBA-2763,PCBA-2762,PCBA-2745,PCBA-2741,PCBA-2734,PCBA-2733,PCBA-2730,PCBA-2729,PCBA-2695,PCBA-2115,PCBA-2111,PCBA-2110,PCBA-2100,PCBA-2712,PCBA-2711,PCBA-2708,PCBA-2701,PCBA-2696,PCBA-2685,PCBA-2680,PCBA-2677,PCBA-2676,PCBA-2486,PCBA-2673,PCBA-2671,PCBA-2669,PCBA-2668,PCBA-2667,PCBA-2666,PCBA-2660,PCBA-2425,PCBA-2381,PCBA-1491,PCBA-1489,PCBA-2613,PCBA-2458,PCBA-2457,PCBA-2456,PCBA-2452,PCBA-2510,PCBA-2594,PCBA-2591,PCBA-2585,PCBA-2572,PCBA-1721,PCBA-2559,PCBA-2551,PCBA-2549,PCBA-2528,PCBA-1030,PCBA-2546,PCBA-2508,PCBA-2507,PCBA-2364,PCBA-2353,PCBA-2173,PCBA-1708,PCBA-1707,PCBA-2501,PCBA-2035,PCBA-2015,PCBA-2454,PCBA-2450,PCBA-2467,PCBA-411,PCBA-2441,PCBA-2422,PCBA-2403,PCBA-2395,PCBA-2195,PCBA-1540,PCBA-2419,PCBA-2414,PCBA-2409,PCBA-2402,PCBA-2244,PCBA-1650,PCBA-1621,PCBA-2429,PCBA-2410,PCBA-1916,PCBA-2391,PCBA-2390,PCBA-1981,PCBA-1863,PCBA-2384,PCBA-2382,PCBA-1985,PCBA-1850,PCBA-2294,PCBA-2323,PCBA-2289,PCBA-1751,PCBA-2286,PCBA-2279,PCBA-1543,PCBA-1541,PCBA-2267,PCBA-2265,PCBA-2263,PCBA-2257,PCBA-1455,PCBA-2253,PCBA-2252,PCBA-2251,PCBA-2242,PCBA-1466,PCBA-2224,PCBA-2213,PCBA-2212,PCBA-2210,PCBA-2208,PCBA-2003,PCBA-2002,PCBA-1999,PCBA-1994,PCBA-1990,PCBA-1988,PCBA-2180,PCBA-2179,PCBA-2160,PCBA-2147,PCBA-2120,PCBA-2112,PCBA-2107,PCBA-2096,PCBA-2010,PCBA-2089,PCBA-2081,PCBA-2080,PCBA-2077,PCBA-2075,PCBA-2051,PCBA-2044,PCBA-2037,PCBA-2027,PCBA-2020,PCBA-2019,PCBA-1868,PCBA-2009,PCBA-1983,PCBA-1975,PCBA-1973,PCBA-1972,PCBA-1969,PCBA-1626,PCBA-1964,PCBA-1960,PCBA-1959,PCBA-1956,PCBA-1872,PCBA-1948,PCBA-1891,PCBA-1944,PCBA-1936,PCBA-1935,PCBA-1934,PCBA-1933,PCBA-1915,PCBA-1914,PCBA-1913,PCBA-1902,PCBA-1900,PCBA-1897,PCBA-1896,PCBA-1895,PCBA-1890,PCBA-1889,PCBA-1888,PCBA-1886,PCBA-1884,PCBA-1883,PCBA-1882,PCBA-1877,PCBA-1876,PCBA-1871,PCBA-1869,PCBA-1865,PCBA-1733,PCBA-1634,PCBA-1631,PCBA-1821,PCBA-1816,PCBA-1815,PCBA-1493,PCBA-1492,PCBA-1461,PCBA-1795,PCBA-1771,PCBA-1770,PCBA-1753,PCBA-1740,PCBA-1739,PCBA-1736,PCBA-1735,PCBA-1731,PCBA-1730,PCBA-1727,PCBA-1725,PCBA-1724,PCBA-1723,PCBA-1705,PCBA-1699,PCBA-1692,PCBA-1691,PCBA-1688,PCBA-1687,PCBA-1686,PCBA-1682,PCBA-1660,PCBA-1641,PCBA-1619,PCBA-1627,PCBA-1253,PCBA-1573,PCBA-1572,PCBA-1571,PCBA-1570,PCBA-1568,PCBA-1567,PCBA-1471,PCBA-1562,PCBA-1559,PCBA-1558,PCBA-1534,PCBA-1518,PCBA-1516,PCBA-1487,PCBA-1479,PCBA-1469,PCBA-1468,PCBA-1465,PCBA-1460,PCBA-1463,PCBA-1458,PCBA-1457,PCBA-1394,PCBA-1454,PCBA-1452,PCBA-1445,PCBA-1444,PCBA-1431,PCBA-1437,PCBA-1435,PCBA-1442,PCBA-1259,PCBA-846,PCBA-1215,PCBA-1421,PCBA-1420,PCBA-1419,PCBA-1418,PCBA-1417,PCBA-1414,PCBA-1412,PCBA-787,PCBA-721,PCBA-691,PCBA-679,PCBA-711,PCBA-1324,PCBA-1399,PCBA-1398,PCBA-1397,PCBA-1396,PCBA-1392,PCBA-1272,PCBA-1252,PCBA-1361,PCBA-1330,PCBA-1328,PCBA-1327,PCBA-1322,PCBA-1320,PCBA-1275,PCBA-927,PCBA-1288,PCBA-1284,PCBA-1279,PCBA-1278,PCBA-1277,PCBA-1250,PCBA-1249,PCBA-1225,PCBA-1223,PCBA-1221,PCBA-1200,PCBA-1198,PCBA-1197,PCBA-1196,PCBA-1000,PCBA-1134,PCBA-1068,PCBA-832,PCBA-820,PCBA-825,PCBA-724,PCBA-935,PCBA-830,PCBA-949,PCBA-826,PCBA-801,PCBA-737,PCBA-733,PCBA-715,PCBA-714,PCBA-713,PCBA-831,PCBA-523,PCBA-790,PCBA-1013,PCBA-718".split(
        ",")
  def create_cid_list(self, assays_to_parse):
    """Find the union of all compounds tested across one or more assays
    """
    assay_paths = list()
    cid_list = np.array(list(), dtype=np.int64)
    assay_no = 0
    for path, dirs, filenames in os.walk(sdf_dir):
      for dir in dirs:
        # Each directory holds a range of assay results
        joined_path = os.path.join(sdf_dir, dir)
        for path, dirs, filenames in os.walk(joined_path):
          for filename in filenames:
            assay_name = "PCBA-" + filename.replace(".csv", "")
            if assay_name not in assays_to_parse:
              continue
            file_path = os.path.join(joined_path, filename)
            df = pd.read_csv(
                file_path, usecols=["PUBCHEM_CID", "PUBCHEM_ACTIVITY_OUTCOME"])
            df = df.dropna()
            df["PUBCHEM_CID"] = df["PUBCHEM_CID"].astype(np.int64)
            assay_paths.append(file_path)
            cid_list = np.append(cid_list, df["PUBCHEM_CID"].as_matrix())
            assay_no = assay_no + 1
            if assay_no % 100 == 0:
              print(
                  "Parsed: {0} of: {1}".format(assay_no, len(assays_to_parse)))
    print("Convert to CID set")
    cid_set = np.unique(cid_list)
    return assay_paths, cid_set
  def create_overview_146(self):
    assay_list = self.pcba_146_assay_list
    self.create_assay_file(assays_to_parse=assay_list, file_name="pcba_146.csv")
  def create_overview_128(self):
    assay_list = self.pcba_128_assay_list
    self.create_assay_file(assays_to_parse=assay_list, file_name="pcba_128.csv")
  def create_overview_for_gene(self, gene_symbol):
    assays_url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/target/genesymbol/{0}/aids/TXT".format(
        gene_symbol)
    r = requests.get(assays_url)
    assays_to_parse = [
        "PCBA-" + str(x) for x in r.text.split('\n') if len(x) > 0
    ]
    file_name = "pcba_{0}.csv".format(gene_symbol)
    self.create_assay_file(assays_to_parse=assays_to_parse, file_name=file_name)
  def create_overview_2475(self):
    '''
    Reflects the results of query (1[TotalSidCount] : 1000000000[TotalSidCount] AND 5[ActiveSidCount] : 10000000000[ActiveSidCount] AND 0[TargetCount] : 1[TargetCount] AND "small molecule"[filt] AND "doseresponse"[filt] )
    :return:
    '''
    assays_to_parse = self.pcba_2475_assay_list
    self.create_assay_file(
        assays_to_parse=assays_to_parse, file_name="pcba_2475.csv")
  def create_assay_file(self, assays_to_parse, file_name):
    cid_start = time.time()
    assay_paths, cid_ref_list = self.create_cid_list(assays_to_parse)
    cid_end = time.time()
    print("CID length is: {0}, created in: {1} hours".format(
        cid_ref_list.size, (cid_end - cid_start) / 3600))
    print("Creating overview of {0} assays".format(len(assay_paths)))
    path_final = os.path.join(data_dir, file_name)
    assay_results = list()
    assay_names = list()
    cid_len = cid_ref_list.size
    all_assay_start = time.time()
    for assay_path in assay_paths:
      assay_start = time.time()
      filename = os.path.basename(assay_path)
      assay_name = "PCBA-" + filename.replace(".csv", "")
      print("Looking at: {0}".format(assay_name))
      df = pd.read_csv(
          assay_path, usecols=["PUBCHEM_CID", "PUBCHEM_ACTIVITY_OUTCOME"])
      df = df.dropna(subset=["PUBCHEM_CID", "PUBCHEM_ACTIVITY_OUTCOME"])
      if len(df.index) == 0:
        continue
      df["IS_ACTIVE"] = df["PUBCHEM_ACTIVITY_OUTCOME"] == "Active"
      df = df.rename(columns={'IS_ACTIVE': assay_name})
      df["PUBCHEM_CID"] = df["PUBCHEM_CID"].astype(int)
      df[assay_name] = df[assay_name].astype(int)
      df = df.set_index("PUBCHEM_CID")
      df = df[~df.index.duplicated(keep='last')]
      assay_results_array = array.array('i', (-1 for i in range(0, cid_len)))
      print(assay_path)
      for i in range(0, cid_len):
        cid = cid_ref_list[i]
        if cid in df.index:
          val = df.get_value(cid, assay_name)
        else:
          # Just write NA
          val = -1
        assay_results_array[i] = val
      assay_names.append(assay_name)
      assay_results.append(assay_results_array)
      assay_end = time.time()
      print("Parsed: {0} in {1} seconds".format(assay_name, assay_end -
                                                assay_start))
    # Now, write out the results csv, going line by line through all molecule results
    assay_results_len = len(assay_results)
    all_assay_end = time.time()
    print("Parsed all assays in: {} hours".format((
        all_assay_end - all_assay_start) / 3600))
    smiles_start = time.time()
    print("Reading in smiles info")
    with open(os.path.join(data_dir, "pubchemsmiles_tuple.pickle"), "rb") as f:
      keys, values = pickle.load(f)
    header_line = list()
    header_line.append("mol_id")
    header_line.append(",smiles")
    for assay_name in assay_names:
      header_line.append(",")
      header_line.append(assay_name)
    header_line_txt = "".join(header_line)
    f_final = open(path_final, "w+")
    f_final.write(header_line_txt + "\n")
    for i in range(0, cid_len):
      cid = cid_ref_list[i]
      # printing the mol_id
      line_for_comp = "CID" + str(cid)
      # printing the SMILES
      bisect_pos = bisect_left(keys, cid, 0)
      cid_pos = bisect_pos if bisect_pos != len(
          keys) and keys[bisect_pos] == cid else -1
      if cid_pos == -1:
        continue
      line_for_comp += "," + str(values[cid_pos])
      for j in range(0, assay_results_len):
        val = assay_results[j][i]
        if val == -1:
          line_for_comp += ","
        else:
          line_for_comp += "," + str(val)
      f_final.write(line_for_comp + "\n")
    f_final.close()
    # Now gzip it
    with open(path_final, 'rb') as f_in:
      with gzip.open(path_final + ".gz", 'wb') as f_out:
        shutil.copyfileobj(f_in, f_out)
    # Now remove the intermediate csv
    os.remove(path_final)
    smiles_end = time.time()
    print("Smiles joined and gzip in: {} hours".format((
        smiles_end - smiles_start) / 3600))
    print("Finished creating dataset: {} in: {} hours".format(
        file_name, (smiles_end - all_assay_start) / 3600))
parser = argparse.ArgumentParser(
    description='Deepchem dataset builder for PCBA datasets')
parser.add_argument(
    '-d',
    action='store',
    dest='dataset_name',
    default="",
    help='Choice of dataset: pcba_128, pcba_146, pcba_2475')
parser.add_argument(
    '-g',
    action='store',
    dest='gene_arg',
    default=None,
    help='Name of gene to create a dataset for')
args = parser.parse_args()
pcba_builder = PCBADatsetBuilder()
if args.dataset_name == "pcba_128":
  pcba_builder.create_overview_128()
elif args.dataset_name == "pcba_146":
  pcba_builder.create_overview_146()
elif args.dataset_name == "pcba_2475":
  pcba_builder.create_overview_2475()
elif args.gene_arg is not None:
  pcba_builder.create_overview_for_gene(args.gene_arg)
else:
  parser.print_help()
 | 
	mit | 
| 
	shangwuhencc/scikit-learn | 
	examples/plot_multilabel.py | 
	236 | 
	4157 | 
	# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
    - pick the number of labels: n ~ Poisson(n_labels)
    - n times, choose a class c: c ~ Multinomial(theta)
    - pick the document length: k ~ Poisson(length)
    - k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen.  The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
    # get the separating hyperplane
    w = clf.coef_[0]
    a = -w[0] / w[1]
    xx = np.linspace(min_x - 5, max_x + 5)  # make sure the line is long enough
    yy = a * xx - (clf.intercept_[0]) / w[1]
    plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
    if transform == "pca":
        X = PCA(n_components=2).fit_transform(X)
    elif transform == "cca":
        X = CCA(n_components=2).fit(X, Y).transform(X)
    else:
        raise ValueError
    min_x = np.min(X[:, 0])
    max_x = np.max(X[:, 0])
    min_y = np.min(X[:, 1])
    max_y = np.max(X[:, 1])
    classif = OneVsRestClassifier(SVC(kernel='linear'))
    classif.fit(X, Y)
    plt.subplot(2, 2, subplot)
    plt.title(title)
    zero_class = np.where(Y[:, 0])
    one_class = np.where(Y[:, 1])
    plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
    plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
               facecolors='none', linewidths=2, label='Class 1')
    plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
               facecolors='none', linewidths=2, label='Class 2')
    plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
                    'Boundary\nfor class 1')
    plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
                    'Boundary\nfor class 2')
    plt.xticks(())
    plt.yticks(())
    plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
    plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
    if subplot == 2:
        plt.xlabel('First principal component')
        plt.ylabel('Second principal component')
        plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
                                      allow_unlabeled=True,
                                      random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
                                      allow_unlabeled=False,
                                      random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
 | 
	bsd-3-clause | 
| 
	rhiever/sklearn-benchmarks | 
	metafeatures/dataset_describe.py | 
	1 | 
	25613 | 
	""" 
Methods to describe attriubutes in a dataset. The last column
of a dataset is assumed to be the dependent variable. 
Methods range from but not restricted to:
 - Description of dataset as a whole
 - Description of individual attributes
 - Description of inter-relation between attributes.
Contact: Harsh Nisar GH: harshnisar
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.decomposition import PCA
from scipy.stats import kurtosis, skew
class Dataset:
    """
    Initialize the dataset and give user the option to set some
    defaults eg. names of categorical columns
    All public methods will provide one value per dataset.
    Private methods are for internal use.  
    prediction_type = {'regression'|'classification'}
    """
    df = None
    df_encoded = None
    categorical_cols = None
    dependent_col = None
    prediction_type = None
    independent_col = None
    def __init__(self, df, prediction_type = None, dependent_col = None,categorical_cols = None):
        
        self.df = df
        self._set_dependent_col(dependent_col)
        self._set_categorical_columns(categorical_cols)
        self._set_prediction_type(prediction_type)
        self.independent_col = list(set(self.df.columns.tolist()) - set([self.dependent_col]))
        self._categorical_column_encoder()
        
    def _set_dependent_col(self, dependent_col):
        """ if nothing given, set the last column in the frame as
        the dependent column."""
        if dependent_col == None:
            self.dependent_col = self.df.columns.tolist()[-1]
        elif dependent_col in self.df.columns.tolist():
            self.dependent_col = dependent_col
        else:
            raise ValueError
    def _set_prediction_type(self, prediction_type):
        """ See the dtype of the dependent_col and return
        either regression or classification 
        """
        if prediction_type == None:
            if self.dependent_col in self.df._get_numeric_data().columns.tolist():
                self.prediction_type = 'regression'
            else:
                self.prediction_type = 'classification'
        else:
            self.prediction_type = prediction_type
    def _set_categorical_columns(self, categorical_cols):
        #TODO: Need to test if the columns exist in the df
        #TODO: Add logic in case user doesn't specify the cols
        if categorical_cols == None:
            num_cols = self.df._get_numeric_data().columns
            cat_cols = list(set(self.df.columns) - set(num_cols) - set([self.dependent_col]))
            self.categorical_cols = cat_cols
            ## empty list in case of no categorical columns or categorical columns are
            ## already label encoded, as in the case of Randal's data.
            ## Assumption: In case of pre-processed data, all columns would be preprocessed
            ## and not just some. Hence, proceed with heuristics only if previous code 
            ## gave zero categorical_cols
            # print cat_cols
            if cat_cols == []:
                possible_cat_cols = []
                threshold_unique = 0.001*self.df.shape[0]
                # print threshold_unique
                for col in list(set(self.df.columns) - set([self.dependent_col])):
                    unique_col = list(self.df[col].unique())
                    unique_col.sort()
                    # print col, len(unique_col)
                    if len(unique_col) < threshold_unique:
                        possible_cat_cols.append(col)
                        continue
                    # print unique_col == range(0, len(unique_col), 1)
                    # print  isinstance(self.df[col][0], np.integer)
                    # If unique values represent intergers from 0 to N, then there
                    # is a high chance they were LabelEncoded using sklearn.
                    # This heaveily relies on the way experiment datasets were encoded.
                    # Not recommended for normal usage.
                    
                    if ((unique_col == range(0, len(unique_col), 1)) & (isinstance(self.df.iloc[0][col], np.integer))):   
                        possible_cat_cols.append(col)
                        continue
                self.categorical_cols = list(set(possible_cat_cols))
        else:
            self.categorical_cols = categorical_cols
    
    def _categorical_column_encoder(self):
        """ Assumes all categorical variables are nominal and not
        ordinal """
        categorical_cols = self.categorical_cols
        
        self.df_encoded = self.df.copy()
        for col in categorical_cols:
            if len(self.df_encoded[col].unique())<=2:
                #this means, binary :- LabelEncode
                self.df_encoded[col] = LabelEncoder().fit_transform(self.df_encoded[col])
            else:
                # nominal - so make dummy"
                self.df_encoded = pd.get_dummies(self.df_encoded, columns=[col])
        
    def n_rows(self):
        return self.df.shape[0]
    def n_columns(self):
        """ Including dependent variable """
        return self.df.shape[1]
    def ratio_rowcol(self):
        """ rows/col including dependent variable """
        return self.df.shape[0]/self.df.shape[1]
    def n_categorical(self):
        """number of categorical variables excluding the dependent."""
        #todo: can be converted to ratio by total number of columns.
        return len(self.categorical_cols)
    def n_numerical(self):
        """number of categorical variables excluding the dependent."""
        #todo: can be converted to ratio by total number of columns.
        return self.n_columns() - self.n_categorical() - 1        
    def n_classes(self):
        """number of classes in the dependent columns. Only applicable
        for classfication problems. Returns NaN otherwise """
        if self.prediction_type == 'classification':
            return len(self.df[self.dependent_col].unique())
        else:
            return np.nan
    ## Post-encoding dimensional stats.
    #todo: n_cols_post_encoding
    #todo: ratop_rowcol_post_encoding
    #----------------------------------------------------------------------
    # Correlation related
    corr_with_dependent = None
    def _get_corr_with_dependent(self):
        """Called from init. Sets up data for correlation related meta-features.
        #todo: take-call - Should I make different classes/modules for
        different types of meta-features? Eg. Correlation, Entropy"""
        
        #Correlation with dependent variable only make sense for regression problems
        if self.prediction_type == 'regression':
            if self.corr_with_dependent!=None:
                return self.corr_with_dependent
            else:
                self.corr_with_dependent = self.df_encoded.corr()[self.dependent_col]
                self.corr_with_dependent = self.corr_with_dependent.loc[self.corr_with_dependent.index!=self.dependent_col]
                return self.corr_with_dependent
    def corr_with_dependent_abs_max(self):
        """ max absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return abs_corr_with_dependent.max()
    
    def corr_with_dependent_abs_min(self):
        """ min absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return abs_corr_with_dependent.min()    
    def corr_with_dependent_abs_mean(self):
        """ mean absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return abs_corr_with_dependent.mean()    
    def corr_with_dependent_abs_median(self):
        """ median absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return abs_corr_with_dependent.median()    
    def corr_with_dependent_abs_std(self):
        """ std absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return abs_corr_with_dependent.std(ddof = 1)    
    def corr_with_dependent_abs_25p(self):
        """ 25p absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return np.nanpercentile(abs_corr_with_dependent, 25)   
    def corr_with_dependent_abs_75p(self):
        """ 75p absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return np.nanpercentile(abs_corr_with_dependent, 75)
    #todo: try kurtosis and skew for correl values without abs.
    def corr_with_dependent_abs_kurtosis(self):
        """ kurtosis of absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        from scipy.stats import kurtosis
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return kurtosis(abs_corr_with_dependent, bias = False)
    def corr_with_dependent_abs_skew(self):
        """ skew of absolute pearson correlation with dependent variable
        returns np.nan for classificaiton problems. Uses df_encoded
        ie dataframe with categorical columns encoded automatically.
        """
        if self.prediction_type == 'classification':
            return np.nan
        else:
            abs_corr_with_dependent = self._get_corr_with_dependent().abs()
            return skew(abs_corr_with_dependent, bias = False)
    #----------------------------------------------------------------------
    # Class probablity related
    class_probablities = None
    def _get_class_probablity(self):
        if self.class_probablities is None:
            dependent_col = self.df[self.dependent_col]
            class_counts = dependent_col.value_counts()
            self.class_probablities = class_counts/self.n_rows()
            return self.class_probablities
        else:
            return self.class_probablities
    def class_prob_min(self):
        if self.prediction_type=='regression':
            return np.nan
        else:
            class_probablities = self._get_class_probablity()
            return class_probablities.min()
    
    def class_prob_max(self):
        if self.prediction_type=='regression':
            return np.nan
        else:
            class_probablities = self._get_class_probablity()
            return class_probablities.max()
    
    def class_prob_std(self):
        if self.prediction_type=='regression':
            return np.nan
        else:
            class_probablities = self._get_class_probablity()
            return class_probablities.std(ddof = 1)    
    
    def class_prob_mean(self):
        if self.prediction_type=='regression':
            return np.nan
        else:
            class_probablities = self._get_class_probablity()
            return class_probablities.mean()    
    
    def class_prob_median(self):
        if self.prediction_type=='regression':
            return np.nan
        else:
            class_probablities = self._get_class_probablity()
            return class_probablities.median()
    #todo: add kurtosis and skew here too. Classes will be usually less, so 
    #may not make sense.
    #----------------------------------------------------------------------
    # Symbols related - All the categorical columns
    symbol_counts_dict = None
    def _get_symbols_per_category(self):
        """
        Sets an dictionary with number of symbols per categorical 
        column using categorical_cols info.
        """
        if self.symbol_counts_dict == None:
            self.symbol_counts_dict = {}
            for column in self.categorical_cols:
                self.symbol_counts_dict[column] = self.df[column].dropna().unique().shape[0]
            return self.symbol_counts_dict
        else:
            return self.symbol_counts_dict
    def symbols_mean(self):
        """ Average symbols per columns """
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return np.nanmean(symbol_counts)
    def symbols_std(self):
        """ std of symbols per columns """
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return np.nanstd(symbol_counts, ddof = 1)
    
    def symbols_min(self):
        """ Average symbols per columns """
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return np.min(symbol_counts)   
    def symbols_max(self):
        """ Average symbols per columns """
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return np.max(symbol_counts)
    def symbols_sum(self):
        """ Sum of symbols per column """
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return np.sum(symbol_counts)  
    def symbols_skew(self):
        from scipy.stats import skew
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return skew(symbol_counts, bias = False)  
    def symbols_kurtosis(self):
        from scipy.stats import kurtosis
        symbol_counts_dict = self._get_symbols_per_category()
        ## None is for checking empty, no categorical columns
        if not symbol_counts_dict:
            return np.nan
        symbol_counts = list(symbol_counts_dict.values())
        return kurtosis(symbol_counts, bias = False)  
    
    ##todo: Note we can evaluate symbol probabilities too.
    #----------------------------------------------------------------------
    # Kustosis related - For all non-categorical columns
    kurtosis_dict = None
    def _get_kurtosis_per_num_column(self):
        """Sets an dictionary with kurtosis per numerical column"""
        if self.kurtosis_dict == None:
            self.kurtosis_dict = {}
            numerical_cols = list(set(self.independent_col) - set(self.categorical_cols)) 
            for column in numerical_cols:
                self.kurtosis_dict[column] = kurtosis(self.df[column].dropna(), bias = False)
            return self.kurtosis_dict
        else:
            return self.kurtosis_dict
    def kurtosis_mean(self):
        """ Mean kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return np.nanmean(kurtosisses)
    def kurtosis_median(self):
        """ Median kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return np.nanmedian(kurtosisses)
    def kurtosis_min(self):
        """ Min kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return np.min(kurtosisses)
    def kurtosis_max(self):
        """ Max kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return np.max(kurtosisses)
    def kurtosis_std(self):
        """ STD of kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return np.nanstd(kurtosisses)
    def kurtosis_kurtosis(self):
        """ Kurtosis of kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return kurtosis(kurtosisses, bias = False)
    
    def kurtosis_skew(self):
        """ skew of kurtosis per columns """
        kurtosis_dict = self._get_kurtosis_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not kurtosis_dict:
            return np.nan
        
        kurtosisses = list(kurtosis_dict.values())
        return skew(kurtosisses, bias = False)
    #----------------------------------------------------------------------
    # Skew related - For all non-categorical columns
    skew_dict = None
    def _get_skew_per_num_column(self):
        """Sets an dictionary with skew measure per numerical column"""
        if self.skew_dict == None:
            self.skew_dict = {}
            numerical_cols = list(set(self.independent_col) - set(self.categorical_cols)) 
            for column in numerical_cols:
                self.skew_dict[column] = skew(self.df[column].dropna(), bias = False)
            return self.skew_dict
        else:
            return self.skew_dict
    def skew_mean(self):
        """ Mean skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return np.nanmean(skews)
    def skew_median(self):
        """ Median skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return np.nanmedian(skews)
    def skew_min(self):
        """ Min skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return np.min(skews)
    def skew_max(self):
        """ Min skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return np.max(skews)
    def skew_std(self):
        """ std skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return np.nanstd(skews)
    def skew_kurtosis(self):
        """ kurtosis of skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return kurtosis(skews, bias = False)
    def skew_skew(self):
        """ skew of skew in all numerical columns """
        skew_dict = self._get_skew_per_num_column()
        ## None is for checking empty, no categorical columns
        
        if not skew_dict:
            return np.nan
        
        skews = list(skew_dict.values())
        return skew(skews, bias = False)
    #----------------------------------------------------------------------
    # PCA 
    _pca_components = None
    
    def _get_pca_components(self):
        """ Should work on dataframe with categorical variables encoded"""
        if self._pca_components != False:
            try:
                clf = PCA(copy = True)
                clf.fit(self.df_encoded[self.df_encoded.columns.drop(self.dependent_col)])
                self._pca_components = clf
                return self._pca_components
            except (Exception, e):
                print(e.message, '\t Could not process PCA')
                self._pca_components = False
                return self._pca_components
        else:
            return self._pca_components
    def pca_fraction_95(self):
        pca_compenents = self._get_pca_components()
        if pca_compenents!=False:
            sum_variance = 0
            min_idx = 0
            for idx, ratio in enumerate(pca_compenents.explained_variance_ratio_):
                sum_variance = sum_variance + ratio
                min_idx = min_idx + 1
                if sum_variance >= 0.95:
                    return float(min_idx)/len(pca_compenents.explained_variance_ratio_)
                else:
                    continue
            return 1
        
        else:
            return np.nan
    #TODO: in effect, mean, max, min, std etc stats can be done on the variance explained ratios.
    # I feel they will be useful.
    #----------------------------------------------------------------------
    # Entropy of the dependent variable - Classification
    def entropy_dependent(self):
        """ Only for Classification problems for now """
        if self.prediction_type == 'classification':
            class_probablities = self._get_class_probablity()
            entropy_bench = -1 * class_probablities * np.log(class_probablities)
            entropy = entropy_bench.sum()
            self.entropy_ = entropy
            return entropy
        else:
            return np.nan
    def diversity_fraction(self):
        """ Only for Classification problems for now """
        if self.prediction_type == 'classification':
            class_probablities = self._get_class_probablity()
            entropy_bench = -1 * class_probablities * np.log(class_probablities)
            entropy = entropy_bench.sum()
            
            diversity = np.e**entropy
            diversity_fraction = diversity/class_probablities.shape[0]
            return diversity_fraction
        else:
            return np.nan
    #TODO: Can do plain diveristy too without any normalization.
 | 
	mit | 
| 
	fabianvf/osf.io | 
	scripts/annotate_rsvps.py | 
	60 | 
	2256 | 
	"""Utilities for annotating workshop RSVP data.
Example ::
    import pandas as pd
    from scripts import annotate_rsvps
    frame = pd.read_csv('workshop.csv')
    annotated = annotate_rsvps.process(frame)
    annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
    try:
        return User.find_one(Q('username', 'iexact', email))
    except ModularOdmException:
        return None
def find_by_name(name):
    try:
        parts = re.split(r'\s+', name.strip())
    except:
        return None
    if len(parts) < 2:
        return None
    users = User.find(
        reduce(
            lambda acc, value: acc & value,
            [
                Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
                for part in parts
            ]
        )
    ).sort('-date_created')
    if not users:
        return None
    if len(users) > 1:
        logger.warn('Multiple users found for name {}'.format(name))
    return users[0]
def logs_since(user, date):
    return NodeLog.find(
        Q('user', 'eq', user._id) &
        Q('date', 'gt', date)
    )
def nodes_since(user, date):
    return Node.find(
        Q('creator', 'eq', user._id) &
        Q('date_created', 'gt', date)
    )
def process(frame):
    frame = frame.copy()
    frame['user_id'] = ''
    frame['user_logs'] = ''
    frame['user_nodes'] = ''
    frame['last_log'] = ''
    for idx, row in frame.iterrows():
        user = (
            find_by_email(row['Email address'].strip()) or
            find_by_name(row['Name'])
        )
        if user:
            date = parse_date(row['Workshop_date'])
            frame.loc[idx, 'user_id'] = user._id
            logs = logs_since(user, date)
            frame.loc[idx, 'user_logs'] = logs.count()
            frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
            if logs:
                frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
    return frame
 | 
	apache-2.0 | 
| 
	rexshihaoren/scikit-learn | 
	examples/svm/plot_weighted_samples.py | 
	69 | 
	1942 | 
	"""
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
    # plot the decision function
    xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
    Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    # plot the line, the points, and the nearest vectors to the plane
    axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
    axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
                 cmap=plt.cm.bone)
    axis.axis('off')
    axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
                       "Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
                       "Modified weights")
plt.show()
 | 
	bsd-3-clause | 
| 
	jzt5132/scikit-learn | 
	examples/covariance/plot_robust_vs_empirical_covariance.py | 
	248 | 
	6359 | 
	r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
  down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
  :math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
  to be good ones. This can be considered as a "perfect" MCD estimation,
  so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
    Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
    Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
    estimation in signal processing: A tutorial-style treatment of
    fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
    (np.linspace(0, n_samples / 8, 5),
     np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
    for j in range(repeat):
        rng = np.random.RandomState(i * j)
        # generate data
        X = rng.randn(n_samples, n_features)
        # add some outliers
        outliers_index = rng.permutation(n_samples)[:n_outliers]
        outliers_offset = 10. * \
            (np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
        X[outliers_index] += outliers_offset
        inliers_mask = np.ones(n_samples).astype(bool)
        inliers_mask[outliers_index] = False
        # fit a Minimum Covariance Determinant (MCD) robust estimator to data
        mcd = MinCovDet().fit(X)
        # compare raw robust estimates with the true location and covariance
        err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
        err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
        # compare estimators learned from the full data set with true
        # parameters
        err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
        err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
            np.eye(n_features))
        # compare with an empirical covariance learned from a pure data set
        # (i.e. "perfect" mcd)
        pure_X = X[inliers_mask]
        pure_location = pure_X.mean(0)
        pure_emp_cov = EmpiricalCovariance().fit(pure_X)
        err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
        err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
             yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
             label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
             yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
             label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
             yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
             label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
             yerr=err_cov_mcd.std(1),
             label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
             err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
             yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
             label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
         err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
         ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
             yerr=err_cov_emp_pure.std(1),
             label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
 | 
	bsd-3-clause | 
| 
	msultan/osprey | 
	osprey/utils.py | 
	3 | 
	12101 | 
	from __future__ import print_function, absolute_import, division
import warnings
import numpy as np
import scipy.sparse as sp
import os.path
import sys
import contextlib
import json
from datetime import datetime
from sklearn.pipeline import Pipeline
from six import StringIO
from .eval_scopes import import_all_estimators
from .trials import JSONEncoded
__all__ = ['dict_merge', 'in_directory', 'prepend_syspath', 'prepend_syspath',
           'Unbuffered', 'format_timedelta', 'current_pretty_time',
           'short_format_time', 'mock_module', 'join_quoted', 'expand_path',
           'is_msmbuilder_estimator', 'num_samples', 'check_arrays',
           'trials_to_dict']
def is_json_serializable(obj):
    """
    Checks to see if obj(ect) is Json serializable
    Returns
    -------
    Bool
    """
    try:
        json.dumps(obj)
        return True
    except TypeError:
        return False
def dict_merge(base, top):
    """Recursively merge two dictionaries, with the elements from `top`
    taking precedence over elements from `top`.
    Returns
    -------
    out : dict
        A new dict, containing the merged records.
    """
    out = dict(top)
    for key in base:
        if key in top:
            if isinstance(base[key], dict) and isinstance(top[key], dict):
                out[key] = dict_merge(base[key], top[key])
        else:
            out[key] = base[key]
    return out
@contextlib.contextmanager
def in_directory(path):
    """Context manager (with statement) that changes the current directory
    during the context.
    """
    curdir = os.path.abspath(os.curdir)
    os.chdir(path)
    yield
    os.chdir(curdir)
@contextlib.contextmanager
def prepend_syspath(path):
    """Contect manager (with statement) that prepends path to sys.path"""
    sys.path.insert(0, path)
    yield
    sys.path.pop(0)
class Unbuffered(object):
    # used to turn off output buffering
    # http://stackoverflow.com/questions/107705/python-output-buffering
    def __init__(self, stream):
        self.stream = stream
    def write(self, data):
        self.stream.write(data)
        self.stream.flush()
    def __getattr__(self, attr):
        return getattr(self.stream, attr)
def format_timedelta(td_object):
    """Format a timedelta object for display to users
    Returns
    -------
    str
    """
    def get_total_seconds(td):
        # timedelta.total_seconds not in py2.6
        return (td.microseconds +
                (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
    seconds = int(get_total_seconds(td_object))
    periods = [('year',    60*60*24*365),
               ('month',   60*60*24*30),
               ('day',     60*60*24),
               ('hour',    60*60),
               ('minute',  60),
               ('second',  1)]
    strings = []
    for period_name, period_seconds in periods:
        if seconds > period_seconds:
            period_value, seconds = divmod(seconds, period_seconds)
            if period_value == 1:
                strings.append("%s %s" % (period_value, period_name))
            else:
                strings.append("%s %ss" % (period_value, period_name))
    return ", ".join(strings)
def current_pretty_time():
    return datetime.now().strftime("%B %d, %Y %l:%M %p")
def _squeeze_time(t):
    """Remove .1s to the time under Windows: this is the time it take to
    stat files. This is needed to make results similar to timings under
    Unix, for tests
    """
    if sys.platform.startswith('win'):
        return max(0, t - .1)
    else:
        return t
def short_format_time(t):
    t = _squeeze_time(t)
    if t > 60:
        return "%4.1fmin" % (t / 60.)
    else:
        return " %5.1fs" % (t)
def mock_module(name):
    class MockModule(object):
        def __cal__(self, *args, **kwargs):
            raise ImportError('no module named %s' % name)
        def __getattr__(self, *args, **kwargs):
            raise ImportError('no module named %s' % name)
    return MockModule()
def join_quoted(values, quote="'"):
    return ', '.join("%s%s%s" % (quote, e, quote) for e in values)
def expand_path(path, base='.'):
    path = os.path.expanduser(path)
    if not os.path.isabs(path):
        path = os.path.join(base, path)
    return path
def is_msmbuilder_estimator(estimator):
    try:
        import msmbuilder
    except ImportError:
        return False
    msmbuilder_estimators = import_all_estimators(msmbuilder).values()
    out = estimator.__class__ in msmbuilder_estimators
    if isinstance(estimator, Pipeline):
        out = any(step.__class__ in msmbuilder_estimators
                  for name, step in estimator.steps)
    return out
def _assert_all_finite(X):
    """Like assert_all_finite, but only for ndarray."""
    X = np.asanyarray(X)
    # First try an O(n) time, O(1) space solution for the common case that
    # everything is finite; fall back to O(n) space np.isfinite to prevent
    # false positives from overflow in sum method
    if (X.dtype.char in np.typecodes['AllFloat'] and
            not np.isfinite(X.sum()) and not np.isfinite(X).all()):
        raise ValueError("Input contains NaN, infinity"
                         " or a value too large for %r." % X.dtype)
def _warn_if_not_finite(X):
    """UserWarning if array contains non-finite elements"""
    X = np.asanyarray(X)
    # First try an O(n) time, O(1) space solution for the common case that
    # everything is finite; fall back to O(n) space np.isfinite to prevent
    # false positives from overflow in sum method
    if (X.dtype.char in np.typecodes['AllFloat'] and
            not np.isfinite(X.sum()) and not np.isfinite(X).all()):
        warnings.warn("Result contains NaN, infinity"
                      " or a value too large for %r." % X.dtype,
                      category=UserWarning)
def num_samples(x, is_nested=False):
    """Return number of samples in array-like x."""
    if hasattr(x, 'fit'):
        # Don't get num_samples from an ensembles length!
        raise TypeError('Expected sequence or array-like, got '
                        'estimator %s' % x)
    if is_nested:
        return sum(num_samples(xx, is_nested=False) for xx in x)
    if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
        if hasattr(x, '__array__'):
            x = np.asarray(x)
        else:
            raise TypeError("Expected sequence or array-like, got %s" %
                            type(x))
    if hasattr(x, 'shape'):
        if len(x.shape) == 0:
            raise TypeError("Singleton array %r cannot be considered"
                            " a valid collection." % x)
        return x.shape[0]
    else:
        return len(x)
def check_arrays(*arrays, **options):
    """Check that all arrays have consistent first dimensions.
    Checks whether all objects in arrays have the same shape or length.
    By default lists and tuples are converted to numpy arrays.
    It is possible to enforce certain properties, such as dtype, continguity
    and sparse matrix format (if a sparse matrix is passed).
    Converting lists to arrays can be disabled by setting ``allow_lists=True``.
    Lists can then contain arbitrary objects and are not checked for dtype,
    finiteness or anything else but length. Arrays are still checked
    and possibly converted.
    Parameters
    ----------
    *arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
        Python lists or tuples occurring in arrays are converted to 1D numpy
        arrays, unless allow_lists is specified.
    sparse_format : 'csr', 'csc' or 'dense', None by default
        If not None, any scipy.sparse matrix is converted to
        Compressed Sparse Rows or Compressed Sparse Columns representations.
        If 'dense', an error is raised when a sparse array is
        passed.
    copy : boolean, False by default
        If copy is True, ensure that returned arrays are copies of the original
        (if not already converted to another format earlier in the process).
    check_ccontiguous : boolean, False by default
        Check that the arrays are C contiguous
    dtype : a numpy dtype instance, None by default
        Enforce a specific dtype.
    warn_nans : boolean, False by default
        Prints warning if nans in the arrays
        Disables allow_nans
    replace_nans : boolean, False by default
        Replace nans in the arrays with zeros
    allow_lists : bool
        Allow lists of arbitrary objects as input, just check their length.
        Disables
    allow_nans : boolean, False by default
        Allows nans in the arrays
    allow_nd : boolean, False by default
        Allows arrays of more than 2 dimensions.
    """
    sparse_format = options.pop('sparse_format', None)
    if sparse_format not in (None, 'csr', 'csc', 'dense'):
        raise ValueError('Unexpected sparse format: %r' % sparse_format)
    copy = options.pop('copy', False)
    check_ccontiguous = options.pop('check_ccontiguous', False)
    dtype = options.pop('dtype', None)
    warn_nans = options.pop('warn_nans', False)
    replace_nans = options.pop('replace_nans', False)
    allow_lists = options.pop('allow_lists', False)
    allow_nans = options.pop('allow_nans', False)
    allow_nd = options.pop('allow_nd', False)
    if options:
        raise TypeError("Unexpected keyword arguments: %r" % options.keys())
    if len(arrays) == 0:
        return None
    n_samples = num_samples(arrays[0])
    checked_arrays = []
    for array in arrays:
        array_orig = array
        if array is None:
            # special case: ignore optional y=None kwarg pattern
            checked_arrays.append(array)
            continue
        size = num_samples(array)
        if size != n_samples:
            raise ValueError("Found array with dim %d. Expected %d"
                             % (size, n_samples))
        if not allow_lists or hasattr(array, "shape"):
            if sp.issparse(array):
                if sparse_format == 'csr':
                    array = array.tocsr()
                elif sparse_format == 'csc':
                    array = array.tocsc()
                elif sparse_format == 'dense':
                    raise TypeError('A sparse matrix was passed, but dense '
                                    'data is required. Use X.toarray() to '
                                    'convert to a dense numpy array.')
                if check_ccontiguous:
                    array.data = np.ascontiguousarray(array.data, dtype=dtype)
                elif hasattr(array, 'data'):
                    array.data = np.asarray(array.data, dtype=dtype)
                elif array.dtype != dtype:
                    array = array.astype(dtype)
                if not allow_nans:
                    if hasattr(array, 'data'):
                        _assert_all_finite(array.data)
                    else:
                        _assert_all_finite(array.values())
            else:
                if check_ccontiguous:
                    array = np.ascontiguousarray(array, dtype=dtype)
                else:
                    array = np.asarray(array, dtype=dtype)
                if warn_nans:
                    allow_nans = True
                    _warn_if_not_finite(array)
                if replace_nans:
                    array = np.nan_to_num(array)
                if not allow_nans:
                    _assert_all_finite(array)
            if not allow_nd and array.ndim >= 3:
                raise ValueError("Found array with dim %d. Expected <= 2" %
                                 array.ndim)
        if copy and array is array_orig:
            array = array.copy()
        checked_arrays.append(array)
    return checked_arrays
def trials_to_dict(trials, columns):
    for trial in trials:
        d = {}
        for i, item in enumerate(columns.items()):
            key, val = item
            new_val = trial[i]
            if isinstance(val.type, JSONEncoded):
                new_val = json.load(StringIO(new_val))
            d[key] = new_val
        yield d
 | 
	apache-2.0 | 
| 
	poryfly/scikit-learn | 
	examples/model_selection/plot_roc_crossval.py | 
	247 | 
	3253 | 
	"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
    See also :func:`sklearn.metrics.auc_score`,
             :func:`sklearn.cross_validation.cross_val_score`,
             :ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
                     random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
    probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
    # Compute ROC curve and area the curve
    fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
    mean_tpr += interp(mean_fpr, fpr, tpr)
    mean_tpr[0] = 0.0
    roc_auc = auc(fpr, tpr)
    plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
         label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
 | 
	bsd-3-clause | 
| 
	valexandersaulys/prudential_insurance_kaggle | 
	venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_fastica.py | 
	272 | 
	7798 | 
	"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
    """ Centers and norms x **in place**
        Parameters
        -----------
        x: ndarray
            Array with an axis of observations (statistical units) measured on
            random variables.
        axis: int, optional
            Axis along which the mean and variance are calculated.
    """
    x = np.rollaxis(x, axis)
    x -= x.mean(axis=0)
    x /= x.std(axis=0)
def test_gs():
    # Test gram schmidt orthonormalization
    # generate a random orthogonal  matrix
    rng = np.random.RandomState(0)
    W, _, _ = np.linalg.svd(rng.randn(10, 10))
    w = rng.randn(10)
    _gs_decorrelation(w, W, 10)
    assert_less((w ** 2).sum(), 1.e-10)
    w = rng.randn(10)
    u = _gs_decorrelation(w, W, 5)
    tmp = np.dot(u, W.T)
    assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
    # Test the FastICA algorithm on very simple data.
    rng = np.random.RandomState(0)
    # scipy.stats uses the global RNG:
    np.random.seed(0)
    n_samples = 1000
    # Generate two sources:
    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
    s2 = stats.t.rvs(1, size=n_samples)
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s
    # Mixing angle
    phi = 0.6
    mixing = np.array([[np.cos(phi), np.sin(phi)],
                       [np.sin(phi), -np.cos(phi)]])
    m = np.dot(mixing, s)
    if add_noise:
        m += 0.1 * rng.randn(2, 1000)
    center_and_norm(m)
    # function as fun arg
    def g_test(x):
        return x ** 3, (3 * x ** 2).mean(axis=-1)
    algos = ['parallel', 'deflation']
    nls = ['logcosh', 'exp', 'cube', g_test]
    whitening = [True, False]
    for algo, nl, whiten in itertools.product(algos, nls, whitening):
        if whiten:
            k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
            assert_raises(ValueError, fastica, m.T, fun=np.tanh,
                          algorithm=algo)
        else:
            X = PCA(n_components=2, whiten=True).fit_transform(m.T)
            k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
            assert_raises(ValueError, fastica, X, fun=np.tanh,
                          algorithm=algo)
        s_ = s_.T
        # Check that the mixing model described in the docstring holds:
        if whiten:
            assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
        center_and_norm(s_)
        s1_, s2_ = s_
        # Check to see if the sources have been estimated
        # in the wrong order
        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
            s2_, s1_ = s_
        s1_ *= np.sign(np.dot(s1_, s1))
        s2_ *= np.sign(np.dot(s2_, s2))
        # Check that we have estimated the original sources
        if not add_noise:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
        else:
            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
    # Test FastICA class
    _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
    ica = FastICA(fun=nl, algorithm=algo, random_state=0)
    sources = ica.fit_transform(m.T)
    assert_equal(ica.components_.shape, (2, 2))
    assert_equal(sources.shape, (1000, 2))
    assert_array_almost_equal(sources_fun, sources)
    assert_array_almost_equal(sources, ica.transform(m.T))
    assert_equal(ica.mixing_.shape, (2, 2))
    for fn in [np.tanh, "exp(-.5(x^2))"]:
        ica = FastICA(fun=fn, algorithm=algo, random_state=0)
        assert_raises(ValueError, ica.fit, m.T)
    assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
    m = [[0, 1], [1, 0]]
    # test for issue #697
    ica = FastICA(n_components=1, whiten=False, random_state=0)
    assert_warns(UserWarning, ica.fit, m)
    assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
    # Test the FastICA algorithm on very simple data.
    rng = np.random.RandomState(0)
    n_samples = 1000
    # Generate two sources:
    t = np.linspace(0, 100, n_samples)
    s1 = np.sin(t)
    s2 = np.ceil(np.sin(np.pi * t))
    s = np.c_[s1, s2].T
    center_and_norm(s)
    s1, s2 = s
    # Mixing matrix
    mixing = rng.randn(6, 2)
    m = np.dot(mixing, s)
    if add_noise:
        m += 0.1 * rng.randn(6, n_samples)
    center_and_norm(m)
    k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
    s_ = s_.T
    # Check that the mixing model described in the docstring holds:
    assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
    center_and_norm(s_)
    s1_, s2_ = s_
    # Check to see if the sources have been estimated
    # in the wrong order
    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
        s2_, s1_ = s_
    s1_ *= np.sign(np.dot(s1_, s1))
    s2_ *= np.sign(np.dot(s2_, s2))
    # Check that we have estimated the original sources
    if not add_noise:
        assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
        assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
    # Test FastICA.fit_transform
    rng = np.random.RandomState(0)
    X = rng.random_sample((100, 10))
    for whiten, n_components in [[True, 5], [False, None]]:
        n_components_ = (n_components if n_components is not None else
                         X.shape[1])
        ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
        Xt = ica.fit_transform(X)
        assert_equal(ica.components_.shape, (n_components_, 10))
        assert_equal(Xt.shape, (100, n_components_))
        ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
        ica.fit(X)
        assert_equal(ica.components_.shape, (n_components_, 10))
        Xt2 = ica.transform(X)
        assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
    # Test FastICA.inverse_transform
    n_features = 10
    n_samples = 100
    n1, n2 = 5, 10
    rng = np.random.RandomState(0)
    X = rng.random_sample((n_samples, n_features))
    expected = {(True, n1): (n_features, n1),
                (True, n2): (n_features, n2),
                (False, n1): (n_features, n2),
                (False, n2): (n_features, n2)}
    for whiten in [True, False]:
        for n_components in [n1, n2]:
            n_components_ = (n_components if n_components is not None else
                             X.shape[1])
            ica = FastICA(n_components=n_components, random_state=rng,
                          whiten=whiten)
            with warnings.catch_warnings(record=True):
                # catch "n_components ignored" warning
                Xt = ica.fit_transform(X)
            expected_shape = expected[(whiten, n_components_)]
            assert_equal(ica.mixing_.shape, expected_shape)
            X2 = ica.inverse_transform(Xt)
            assert_equal(X.shape, X2.shape)
            # reversibility test in non-reduction case
            if n_components == X.shape[1]:
                assert_array_almost_equal(X, X2)
 | 
	gpl-2.0 | 
| 
	aakashsinha19/Aspectus | 
	Master Code/classification.py | 
	2 | 
	4293 | 
	from matplotlib import pyplot as plt
import numpy as np
import os
import tensorflow as tf
import urllib2
from datasets import imagenet
from nets import vgg
from preprocessing import vgg_preprocessing
checkpoints_dir = '/home/aakash-sinha/checkpoints'
slim = tf.contrib.slim
# We need default size of image for a particular network.
# The network was trained on images of that size -- so we
# resize input image later in the code.
image_size = vgg.vgg_16.default_image_size
with tf.Graph().as_default():
   
    url = ("https://upload.wikimedia.org/wikipedia/commons/1/1f/Taj_Mahal_N-UP-A28-a.jpg")
    # Open specified url and load image as a string
    image_string = urllib2.urlopen(url).read()
    
    # Decode string into matrix with intensity values
    image = tf.image.decode_jpeg(image_string, channels=3)
    
    # Resize the input image, preserving the aspect ratio
    # and make a central crop of the resulted image.
    # The crop will be of the size of the default image size of
    # the network.
    processed_image = vgg_preprocessing.preprocess_image(image,
                                                         image_size,
                                                         image_size,
                                                         is_training=False)
    
    # Networks accept images in batches.
    # The first dimension usually represents the batch size.
    # In our case the batch size is one.
    processed_images  = tf.expand_dims(processed_image, 0)
    
    # Create the model, use the default arg scope to configure
    # the batch norm parameters. arg_scope is a very conveniet
    # feature of slim library -- you can define default
    # parameters for layers -- like stride, padding etc.
    with slim.arg_scope(vgg.vgg_arg_scope()):
        logits, _ = vgg.vgg_16(processed_images,
                               num_classes=1000,
                               is_training=False)
    
    # In order to get probabilities we apply softmax on the output.
    probabilities = tf.nn.softmax(logits)
    
    # Create a function that reads the network weights
    # from the checkpoint file that you downloaded.
    # We will run it in session later.
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
        slim.get_model_variables('vgg_16'))
    
    with tf.Session() as sess:
        
        # Load weights
        init_fn(sess)
        
        # We want to get predictions, image as numpy matrix
        # and resized and cropped piece that is actually
        # being fed to the network.
        np_image, network_input, probabilities = sess.run([image,
                                                           processed_image,
                                                           probabilities])
        probabilities = probabilities[0, 0:]
        sorted_inds = [i[0] for i in sorted(enumerate(-probabilities),
                                            key=lambda x:x[1])]
    
    # Show the downloaded image
    plt.figure()
    plt.imshow(np_image.astype(np.uint8))
    plt.suptitle("Input Image", fontsize=14, fontweight='bold')
    plt.axis('off')
    plt.show()
    
    # Show the image that is actually being fed to the network
    # The image was resized while preserving aspect ratio and then
    # cropped. After that, the mean pixel value was subtracted from
    # each pixel of that crop. We normalize the image to be between [-1, 1]
    # to show the image.
    plt.imshow( network_input / (network_input.max() - network_input.min()) )
    plt.suptitle("Segmentation using Classification",
                 fontsize=14, fontweight='bold')
    plt.axis('off')
    plt.show()
    names = imagenet.create_readable_names_for_imagenet_labels()
    for i in range(5):
        index = sorted_inds[i]
        # Now we print the top-5 predictions that the network gives us with
        # corresponding probabilities. Pay attention that the index with
        # class names is shifted by 1 -- this is because some networks
        # were trained on 1000 classes and others on 1001. VGG-16 was trained
        # on 1000 classes.
        print('Probability %0.2f => [%s]' % (probabilities[index], names[index+1]))
        
    res = slim.get_model_variables()
 | 
	apache-2.0 | 
| 
	markstoehr/phoneclassification | 
	local/load_compare_phone_sets_spec_kernel.py | 
	1 | 
	4667 | 
	import numpy as np
from sklearn import cross_validation, svm
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
import argparse
parser = argparse.ArgumentParser("""Code to use cross-validation to assess the performance of kernel svms on pairwise-comparisons
""")
parser.add_argument('--phn_set1',type=str,nargs='+',help='first set of phones')
parser.add_argument('--phn_set2',type=str,nargs='+',help='second set of phones')
parser.add_argument('--data_path',type=str,help='path to where the phone sets are stored',default='/var/tmp/stoehr/phoneclassification/data')
parser.add_argument('--save_prefix',type=str,help='prefix for saving the accuracy and results table')
# parser.add_argument('--',type=,help='')
args = parser.parse_args()
def load_data_set(phn_set1,phn_set2, data_path, data_set):
    for phn_id,phn in enumerate(phn_set1):
        if phn_id == 0:
            X_data = np.load('%s/%s_%s_examples_S.npy' % (data_path,
                                                                   phn,data_set)
                                                                   )
            X_shape = X_data.shape[1:]
            X_data = X_data.reshape(len(X_data),
                                     np.prod(X_shape))
            n_data = len(X_data)
            y_data = np.zeros(n_data,dtype=np.intc)
        else:
            X = np.load('%s/%s_%s_examples_S.npy' % (data_path,
                                                                   phn,data_set)
                                                                   )
            X = X.reshape(X.shape[0],np.prod(X_shape))
            while X.shape[0] + n_data > X_data.shape[0]:
                new_X_data = np.zeros((2*X_data.shape[0],X_data.shape[1]))
                new_X_data[:n_data] = X_data[:n_data]
                X_data = new_X_data
                new_y_data = np.zeros(2*len(y_data),dtype=np.intc)
                new_y_data[:n_data] = y_data[:n_data]
                y_data = new_y_data
            X_data[n_data:
                   n_data + len(X)] = X
            y_data[n_data:
                   n_data + len(X)] = 0
            n_data += len(X)
    for phn_id,phn in enumerate(phn_set2):
        X = np.load('%s/%s_%s_examples_S.npy' % (data_path,
                                                                   phn,data_set)
                                                                   )
        X = X.reshape(X.shape[0],np.prod(X_shape))
        while X.shape[0] + n_data > X_data.shape[0]:
            new_X_data = np.zeros((2*X_data.shape[0],X_data.shape[1]))
            new_X_data[:n_data] = X_data[:n_data]
            X_data = new_X_data
            new_y_data = np.zeros(2*len(y_data),dtype=np.intc)
            new_y_data[:n_data] = y_data[:n_data]
            y_data = new_y_data
        X_data[n_data:
                n_data + len(X)] = X
        y_data[n_data:
                n_data + len(X)] = 1
        n_data += len(X)
    return  X_data[:n_data],y_data[:n_data]
X_train, y_train = load_data_set(args.phn_set1,args.phn_set2, args.data_path, 'train')
X_dev, y_dev = load_data_set(args.phn_set1,args.phn_set2, args.data_path, 'dev')
tuned_parameters = [{'kernel':['rbf'], 'gamma':[1e-2,1e-3,1e-4,1e-5,1e-1],
                     'C':[.1,1,10,.01,100]}]
print 'commencing training'
error_values = []
for gamma_id, gamma in enumerate([1e-4]):
    for C_id, C in enumerate([100]):
        clf = SVC(C=C,gamma=gamma,kernel='rbf',tol=0.00001,verbose=True)
        clf.fit(X_train,y_train)
        s = clf.score(X_dev,y_dev)
        print "C=%g\tgamma=%g\tscore=%g" % (C,gamma,s)
        error_values.append((gamma,C,s))
        if gamma_id == 0 and C_id ==0:
            best_score = s
            print "updated best score to %g" % best_score
            best_C = C
            best_gamma = gamma
        elif s > best_score:
            best_score = s
            print "updated best score to %g" % best_score
            best_C = C
            best_gamma = gamma
X = np.zeros((len(X_train) + len(X_dev),X_train.shape[1]),dtype=float)
y = np.zeros(len(X_train) + len(X_dev),dtype=int)
X[:len(X_train)] = X_train
X[len(X_train):] = X_dev
y[:len(y_train)] = y_train
y[len(y_train):] = y_dev
clf = SVC(C=best_C,gamma=best_gamma,kernel='rbf',tol=0.00001,verbose=True)
clf.fit(X,y)
X_test, y_test = load_data_set(args.phn_set1,args.phn_set2, args.data_path, 'core_test')
s = clf.score(X_test,
                   y_test)
open('%s_accuracy.txt' % args.save_prefix,'w').write(str(s))
print args.phn_set1,args.phn_set2, s
error_values = np.array(error_values)
np.save('%s_error_values.npy' % args.save_prefix,error_values)
 | 
	gpl-3.0 | 
| 
	andreabrambilla/libres | 
	python/res/enkf/export/gen_data_collector.py | 
	2 | 
	1956 | 
	import math
from pandas import DataFrame, MultiIndex
import numpy
from res.enkf import ErtImplType, EnKFMain, EnkfFs, RealizationStateEnum, GenKwConfig
from res.enkf.plot_data import EnsemblePlotGenData
from ecl.util.util import BoolVector
class GenDataCollector(object):
    @staticmethod
    def loadGenData(ert, case_name, key, report_step):
        """@type ert: EnKFMain
        @type case_name: str
        @type key: str
        @type report_step: int
        @rtype: DataFrame
        In the returned dataframe the realisation index runs along the
        rows, and the gen_data element index runs vertically along the
        columns.
        """
        fs = ert.getEnkfFsManager().getFileSystem(case_name)
        realizations = fs.realizationList( RealizationStateEnum.STATE_HAS_DATA )
        config_node = ert.ensembleConfig().getNode(key)
        gen_data_config = config_node.getModelConfig()
        ensemble_data = EnsemblePlotGenData( config_node , fs , report_step )
        # The data size and active can only be inferred *after* the EnsembleLoad.
        data_size = gen_data_config.getDataSize( report_step )
        active_mask = gen_data_config.getActiveMask()
        data_array = numpy.empty(shape=(data_size , len(realizations)) , dtype=numpy.float64)
        data_array.fill( numpy.nan )
        for realization_index, realization_number in enumerate(realizations):
            realization_vector = ensemble_data[realization_number]
            if len(realization_vector) > 0: # Must check because of a bug changing between different case with different states
                for data_index in range(data_size):
                    if active_mask[data_index]:
                        value = realization_vector[data_index]
                        data_array[data_index][realization_index] = value
        realizations = numpy.array(realizations)
        return DataFrame( data = data_array , columns = realizations )
 | 
	gpl-3.0 | 
| 
	evan-magnusson/dynamic | 
	Data/Calibration/DepreciationParameters/Program/testing script.py | 
	2 | 
	7260 | 
	'''
-------------------------------------------------------------------------------
Date created: 4/15/2015
Last updated 4/15/2015
-------------------------------------------------------------------------------
This py-file tests the naics_processing.py program.
-------------------------------------------------------------------------------
    Packages:
-------------------------------------------------------------------------------
'''
import os.path
import numpy as np
import pandas as pd
#
import naics_processing as naics
'''
-------------------------------------------------------------------------------
The main script of the program:
-------------------------------------------------------------------------------
Testing the "load_naics" function.
Checks:
    1)  Recreate the list of naics codes using the tree. Check to see that this
        matches the input.
    2)  
-------------------------------------------------------------------------------
'''
def test_load_naics(path = None, messages = True):
    # Default path if none is specified:
    if path == None:
        path = os.getcwd()
        path = os.path.abspath(path + "\\data\\2012_NAICS_Codes.csv")
    # Using the function being tested to create a tree:
    cur_tree = naics.load_naics(path)
    # Replicating the codes in the input file:
    rep_codes = np.zeros(0)
    for ind in cur_tree.enum_inds:
        cur_codes = ind.data.dfs["Codes:"].iloc[:,0]
        rep_codes = np.append(rep_codes, cur_codes)
    rep_codes = rep_codes.astype(int)
    rep_codes = np.unique(rep_codes)
    rep_codes = np.sort(rep_codes)
    #
    orig_data = pd.read_csv(path).iloc[:,0]
    orig_codes = np.zeros(0)
    for i in xrange(0, len(orig_data)):
        cur_codes = str(orig_data[i]).split("-")
        orig_codes = np.append(orig_codes, cur_codes)
    orig_codes = orig_codes.astype(int)
    orig_codes = np.unique(orig_codes)
    orig_codes = np.sort(orig_codes)
    #
    rep_index = 0
    orig_index = 0
    matches = 0
    while((rep_index < len(rep_codes)) and (orig_index < len(orig_codes))):
        if(rep_codes[rep_index] == int(orig_codes[orig_index])):
            rep_index += 1
            orig_index += 1
            matches += 1
        elif(rep_codes[rep_index] <= orig_codes[orig_index]):
            rep_index += 1
        elif(rep_codes[rep_index] >= orig_codes[orig_index]):
            orig_index += 1
    if matches == len(orig_codes):
        if messages:
            print "\"load_naics\" passed test 1."
        return None
    else:
        mismatch = str(len(orig_codes) - matches)
        if messages:
            print "\"load_naics\" failed test 1. Mismatches:" + mismatch + "."
        return int(mismatch)
'''
-------------------------------------------------------------------------------
Prints out the contents of a tree.  Creates a csv file for each dataframe key.
Each line in the csv file has the contents of the df for a specific industry.
This allows the data to be manually checked in excel.
-------------------------------------------------------------------------------
'''
def print_tree_dfs(data_tree, out_path = None, data_types = None):
    if out_path == None:
        out_path = os.getcwd()
        out_path = os.path.abspath(out_path + "\\OUTPUT\\tests\\tree_data")
    #
    if data_types == None:
        data_types = data_tree.enum_inds[0].data.dfs.keys()
        data_types.remove("Codes:")
    #
    for i in data_types:
        cur_cols = data_tree.enum_inds[0].data.dfs[i].columns.values.tolist()
        cur_cols = ["Codes:"] + cur_cols
        cur_pd = np.zeros((0,len(cur_cols)))
        for j in xrange(0,len(data_tree.enum_inds)):
            cur_data = data_tree.enum_inds[j].data.dfs[i].iloc[0,:]
            if(np.sum((cur_data != np.zeros(len(cur_cols)-1))) == 0):
                continue
            cur_code = data_tree.enum_inds[j].data.dfs["Codes:"].iloc[0,0]
            cur_data = np.array([cur_code] + cur_data.tolist())
            
            cur_pd = np.vstack((cur_pd, cur_data))
        cur_pd = pd.DataFrame(cur_pd, columns = cur_cols)
        cur_pd.to_csv(out_path + "\\" + i + ".csv")
        
    
#'''
#-------------------------------------------------------------------------------
#Testing the "load_soi_corporate_data" function.
#Checks:
#    1)  
#    2)  
#-------------------------------------------------------------------------------
#'''
#def test_load_soi_corporate_data(data_tree, loaded = False, path = None, out_path = None):
#    # Default path if none is specified:
#    if path == None:
#        path = os.getcwd()
#        path = os.path.abspath(path + "\\data")
#    #
#    if out_path == None:
#        out_path = os.getcwd()
#        out_path = os.path.abspath(out_path + "\\OUTPUT\\tests")
#    #
#    if(not loaded):
#        naics.load_soi_corporate_data(data_tree, path)
#    #
#    corp_types = ["tot_corps", "s_corps", "c_corps"]
#    #
#    for i in corp_types:
#        cur_cols = data_tree.enum_inds[0].data.dfs[i].columns.values.tolist()
#        cur_cols = ["Codes:"] + cur_cols
#        cur_pd = np.zeros((0,len(cur_cols)))
#        for j in xrange(0,len(data_tree.enum_inds)):
#            cur_data = data_tree.enum_inds[j].data.dfs[i].iloc[0,:]
#            if(np.sum((cur_data != np.zeros(len(cur_cols)-1))) == 0):
#                continue
#            cur_code = data_tree.enum_inds[j].data.dfs["Codes:"].iloc[0,0]
#            cur_data = np.array([cur_code] + cur_data.tolist())
#            
#            cur_pd = np.vstack((cur_pd, cur_data))
#        cur_pd = pd.DataFrame(cur_pd, columns = cur_cols)
#        cur_pd.to_csv(out_path + "\\" + i + ".csv")
#    
#'''
#-------------------------------------------------------------------------------
#Testing the "load_soi_corporate_data" function.
#Checks:
#    1)  
#    2)  
#-------------------------------------------------------------------------------
#'''
#def test_load_soi_partner_data(data_tree, loaded = False, path = None, out_path = None):
#    # Default path if none is specified:
#    if path == None:
#        path = os.getcwd()
#        path = os.path.abspath(path + "\\data")
#    #
#    if out_path == None:
#        out_path = os.getcwd()
#        out_path = os.path.abspath(out_path + "\\OUTPUT\\tests")
#    #
#    if(not loaded):
#        naics.load_soi_partner_data(data_tree, path)
#    #
#    #corp_types = ["tot_corps", "s_corps", "c_corps"]
#    asset_types = ['PA_inc_loss', 'PA_assets', 'PA_types']
#    #
#    for i in asset_types:
#        cur_cols = data_tree.enum_inds[0].data.dfs[i].columns.values.tolist()
#        cur_cols = ["Codes:"] + cur_cols
#        cur_pd = np.zeros((0,len(cur_cols)))
#        for j in xrange(0,len(data_tree.enum_inds)):
#            cur_data = data_tree.enum_inds[j].data.dfs[i].iloc[0,:]
#            if(np.sum((cur_data != np.zeros(len(cur_cols)-1))) == 0):
#                continue
#            cur_code = data_tree.enum_inds[j].data.dfs["Codes:"].iloc[0,0]
#            cur_data = np.array([cur_code] + cur_data.tolist())
#            
#            cur_pd = np.vstack((cur_pd, cur_data))
#        cur_pd = pd.DataFrame(cur_pd, columns = cur_cols)
#        print cur_pd
#        cur_pd.to_csv(os.path.abspath(out_path + "\\" + i + ".csv"))
 | 
	mit | 
| 
	vikalibrate/FortesFit | 
	fortesfit/test_model.py | 
	1 | 
	1176 | 
	from sys import exit
import numpy as np
from scipy import integrate, constants, interpolate
import matplotlib.pyplot as plt
from fortesfit.FortesFit_Settings import FortesFit_Cosmology as cosmo
""" FortesFit compliant readin module for the FortesFit main test model.
	This is a flat SED in nuFnu with a variable monochromatic luminosity.
"""
# No readtemplates function
# ***********************************************************************************************
def		readin(parameters, redshift, templates=None):
	""" Given a specific parameter set and a redshift, return a flat SED in nuFnu 
		The parameters are :
			MonochromLuminosity: the monochromatic luminosity in log erg/s/cm^2
	"""
	
	wave_basic = 10**(-1.0 + np.arange(1001)*(4.0/1000.0))  #  base wavelengths are in microns
	wave = wave_basic*(1.0+redshift)  # Observed wavelength 
	template_orig = np.full(1001,1e45)
	scale_factor = 10**(parameters['MonochromLuminosity'] - 45.0)
	
	lfactor = 4.0*np.pi*(cosmo.luminosity_distance(redshift).value*3.0856e24)**2.0
	observedflux = (template_orig*scale_factor/lfactor)/wave
	sed = {'observed_wavelength':wave,'observed_flux':observedflux}
	
	return sed
 | 
	mit | 
| 
	equialgo/scikit-learn | 
	sklearn/metrics/cluster/unsupervised.py | 
	8 | 
	10183 | 
	"""Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
#          Arnaud Fouchet <[email protected]>
#          Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils.fixes import bincount
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
    if not 1 < n_labels < n_samples:
        raise ValueError("Number of labels is %d. Valid values are 2 "
                         "to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
                     random_state=None, **kwds):
    """Compute the mean Silhouette Coefficient of all samples.
    The Silhouette Coefficient is calculated using the mean intra-cluster
    distance (``a``) and the mean nearest-cluster distance (``b``) for each
    sample.  The Silhouette Coefficient for a sample is ``(b - a) / max(a,
    b)``.  To clarify, ``b`` is the distance between a sample and the nearest
    cluster that the sample is not a part of.
    Note that Silhouette Coefficent is only defined if number of labels
    is 2 <= n_labels <= n_samples - 1.
    This function returns the mean Silhouette Coefficient over all samples.
    To obtain the values for each sample, use :func:`silhouette_samples`.
    The best value is 1 and the worst value is -1. Values near 0 indicate
    overlapping clusters. Negative values generally indicate that a sample has
    been assigned to the wrong cluster, as a different cluster is more similar.
    Read more in the :ref:`User Guide <silhouette_coefficient>`.
    Parameters
    ----------
    X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
             [n_samples_a, n_features] otherwise
        Array of pairwise distances between samples, or a feature array.
    labels : array, shape = [n_samples]
         Predicted labels for each sample.
    metric : string, or callable
        The metric to use when calculating distance between instances in a
        feature array. If metric is a string, it must be one of the options
        allowed by :func:`metrics.pairwise.pairwise_distances
        <sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
        array itself, use ``metric="precomputed"``.
    sample_size : int or None
        The size of the sample to use when computing the Silhouette Coefficient
        on a random subset of the data.
        If ``sample_size is None``, no sampling is used.
    random_state : integer or numpy.RandomState, optional
        The generator used to randomly select a subset of samples if
        ``sample_size is not None``. If an integer is given, it fixes the seed.
        Defaults to the global numpy random number generator.
    `**kwds` : optional keyword parameters
        Any further parameters are passed directly to the distance function.
        If using a scipy.spatial.distance metric, the parameters are still
        metric dependent. See the scipy docs for usage examples.
    Returns
    -------
    silhouette : float
        Mean Silhouette Coefficient for all samples.
    References
    ----------
    .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
       Interpretation and Validation of Cluster Analysis". Computational
       and Applied Mathematics 20: 53-65.
       <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
    .. [2] `Wikipedia entry on the Silhouette Coefficient
           <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
    """
    if sample_size is not None:
        X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
        random_state = check_random_state(random_state)
        indices = random_state.permutation(X.shape[0])[:sample_size]
        if metric == "precomputed":
            X, labels = X[indices].T[indices].T, labels[indices]
        else:
            X, labels = X[indices], labels[indices]
    return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
    """Compute the Silhouette Coefficient for each sample.
    The Silhouette Coefficient is a measure of how well samples are clustered
    with samples that are similar to themselves. Clustering models with a high
    Silhouette Coefficient are said to be dense, where samples in the same
    cluster are similar to each other, and well separated, where samples in
    different clusters are not very similar to each other.
    The Silhouette Coefficient is calculated using the mean intra-cluster
    distance (``a``) and the mean nearest-cluster distance (``b``) for each
    sample.  The Silhouette Coefficient for a sample is ``(b - a) / max(a,
    b)``.
    Note that Silhouette Coefficent is only defined if number of labels
    is 2 <= n_labels <= n_samples - 1.
    This function returns the Silhouette Coefficient for each sample.
    The best value is 1 and the worst value is -1. Values near 0 indicate
    overlapping clusters.
    Read more in the :ref:`User Guide <silhouette_coefficient>`.
    Parameters
    ----------
    X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
             [n_samples_a, n_features] otherwise
        Array of pairwise distances between samples, or a feature array.
    labels : array, shape = [n_samples]
             label values for each sample
    metric : string, or callable
        The metric to use when calculating distance between instances in a
        feature array. If metric is a string, it must be one of the options
        allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
        the distance array itself, use "precomputed" as the metric.
    `**kwds` : optional keyword parameters
        Any further parameters are passed directly to the distance function.
        If using a ``scipy.spatial.distance`` metric, the parameters are still
        metric dependent. See the scipy docs for usage examples.
    Returns
    -------
    silhouette : array, shape = [n_samples]
        Silhouette Coefficient for each samples.
    References
    ----------
    .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
       Interpretation and Validation of Cluster Analysis". Computational
       and Applied Mathematics 20: 53-65.
       <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
    .. [2] `Wikipedia entry on the Silhouette Coefficient
       <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
    """
    X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
    le = LabelEncoder()
    labels = le.fit_transform(labels)
    check_number_of_labels(len(le.classes_), X.shape[0])
    distances = pairwise_distances(X, metric=metric, **kwds)
    unique_labels = le.classes_
    n_samples_per_label = bincount(labels, minlength=len(unique_labels))
    # For sample i, store the mean distance of the cluster to which
    # it belongs in intra_clust_dists[i]
    intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
    # For sample i, store the mean distance of the second closest
    # cluster in inter_clust_dists[i]
    inter_clust_dists = np.inf + intra_clust_dists
    for curr_label in range(len(unique_labels)):
        # Find inter_clust_dist for all samples belonging to the same
        # label.
        mask = labels == curr_label
        current_distances = distances[mask]
        # Leave out current sample.
        n_samples_curr_lab = n_samples_per_label[curr_label] - 1
        if n_samples_curr_lab != 0:
            intra_clust_dists[mask] = np.sum(
                current_distances[:, mask], axis=1) / n_samples_curr_lab
        # Now iterate over all other labels, finding the mean
        # cluster distance that is closest to every sample.
        for other_label in range(len(unique_labels)):
            if other_label != curr_label:
                other_mask = labels == other_label
                other_distances = np.mean(
                    current_distances[:, other_mask], axis=1)
                inter_clust_dists[mask] = np.minimum(
                    inter_clust_dists[mask], other_distances)
    sil_samples = inter_clust_dists - intra_clust_dists
    sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
    # score 0 for clusters of size 1, according to the paper
    sil_samples[n_samples_per_label.take(labels) == 1] = 0
    return sil_samples
def calinski_harabaz_score(X, labels):
    """Compute the Calinski and Harabaz score.
    The score is defined as ratio between the within-cluster dispersion and
    the between-cluster dispersion.
    Read more in the :ref:`User Guide <calinski_harabaz_index>`.
    Parameters
    ----------
    X : array-like, shape (``n_samples``, ``n_features``)
        List of ``n_features``-dimensional data points. Each row corresponds
        to a single data point.
    labels : array-like, shape (``n_samples``,)
        Predicted labels for each sample.
    Returns
    -------
    score : float
        The resulting Calinski-Harabaz score.
    References
    ----------
    .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
       analysis". Communications in Statistics
       <http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
    """
    X, labels = check_X_y(X, labels)
    le = LabelEncoder()
    labels = le.fit_transform(labels)
    n_samples, _ = X.shape
    n_labels = len(le.classes_)
    check_number_of_labels(n_labels, n_samples)
    extra_disp, intra_disp = 0., 0.
    mean = np.mean(X, axis=0)
    for k in range(n_labels):
        cluster_k = X[labels == k]
        mean_k = np.mean(cluster_k, axis=0)
        extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
        intra_disp += np.sum((cluster_k - mean_k) ** 2)
    return (1. if intra_disp == 0. else
            extra_disp * (n_samples - n_labels) /
            (intra_disp * (n_labels - 1.)))
 | 
	bsd-3-clause | 
| 
	ajerneck/thatsfordinner | 
	determine_topics.py | 
	1 | 
	1819 | 
	## select best number of topics using loglikelihood.
import collections
import itertools
import matplotlib.pyplot as plt
import numpy as np
import lda
import pandas as pd
import pickle
import psycopg2
import explore as e
import production as p
df = p.load_data()
df = p.clean_formatting(df)
df = p.remove_stopwords(df)
assert(type(df)) is pd.core.frame.DataFrame, "%r is not a DataFrame." % df
assert(df.shape) == (16526, 7), "Has the wrong shape."
vectorizer, features = p.extract_features(df, title=True)
ll = {}
ms = []
for k in range(5, 100, 5):
    print k
    mk  = lda.LDA(n_topics=k, random_state=0, n_iter=1000)
    mk.fit(features)
    ll[k] = mk.loglikelihood()
    ms.append(mk)
ll_5_100_1000 = ll
ms_5_100_1000 = ms
plot_lls(ll_5_100_1000, 'Loglikelihood by number of topics', 'll-topics-5-100-1000.png')
def plot_lls(ll, title, filename):
    ks = sorted(ll.keys())
    vs = [ll[k] for k in ks]
    plt.style.use('ggplot')
    plt.figure()
    plt.plot(ks, vs)
    plt.title(title)
    plt.savefig(filename)
x1 = pd.DataFrame(ll_5_100_200.items(), columns=['topics','ll'])
x1['run'] = 1
x2 = pd.DataFrame(ll_20_40_400.items(), columns=['topics','ll'])
x2['run'] = 2
x3 = pd.DataFrame(ll_5_100_1000.items(), columns=['topics','ll'])
x3['run'] = 3
xx = pd.concat([x1, x2, x3])
xx = xx.sort('topics')
xx.index = xx['topics']
plt.figure()
colors = ['red','green','blue']
for n, g in xx.groupby('run'):
    plt.plot(g['topics'], g['ll'], color=colors[n-1])
plt.savefig('testing.png')
plt.figure()
g = xx[xx['run']==3]
g['ll'] = g['ll'] / 10000.0
plt.plot(g['topics'], g['ll'], color=colors[n-1])
plt.xlabel('Number of topics')
plt.ylabel('Model fit (loglikelihood)')
plt.savefig('loglikelihood-topics.png')
## I AM HERE: do a nice plot for the demo.
xx.plot('x=')
## save
ll_title = ll
 | 
	bsd-3-clause | 
| 
	rohanp11/IITIGNSSR | 
	src/tecvtime.py | 
	1 | 
	7190 | 
	# Imports
import os,copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date, timedelta as td
# Function to cheak if leap year
def checkleap(year):
	return ((year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)))
# Date of the year Conversion
def convert_date(day,month,year):
	if checkleap(year)==True:
		days = [31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335]
		if month == 1:
			return day
		else:
			return day+days[month-2]
	else:
		days = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
		if month == 1:
			return day
		else:
			return day+days[month-2]
#Function to check presence of Nan			
def checknonan(df):
	for i in df:
		if np.isnan(i):
			return False
	return True
#Function to count number of Nan	
def countnan(df):
	count = 0
	for i in df:
		if np.isnan(i):
			count = count+1
	return count
#Function to convet time of week in seconds to hours
def gettime(times):
	hours = 0.0
	minutes = 0.0
	t = -1
	tm = []
	for each in times:
		if t!=each:
			minutes = minutes+1
			if minutes>60:
				hours = hours+1
				minutes = minutes%60
			t = each
		tm1 = float(hours+(float(minutes/60)))
		tm.append(tm1)
	return tm
#Function to check validity of dates
def validdt(start_date,start_month,start_year,end_date,end_month,end_year,date_from,date_upto):
    if start_year>end_year or (start_year<date_from.year or end_year>date_upto.year) :
        return False
    elif start_year==end_year and (start_year==date_from.year and end_year==date_upto.year) and (start_month>end_month or start_month<date_from.month or end_month>date_upto.month):
        return False
    elif start_year==end_year and (start_year==date_from.year and end_year==date_upto.year) and start_month==end_month and (start_month==date_from.month and end_month==date_upto.month) and (start_date>end_date or start_date<date_from.day or end_date>date_upto.day):
        return False
    return True
#Function to obtain range of dates
def daterange(start_date, end_date):
    for n in range(int ((end_date - start_date).days)):
        yield start_date + td(n)
#Function to convert folder name into human readable format date
def conv_readdate(dt):
	dt_year = 2000+int(dt/1000)
	dt_doy = dt%1000
	t = date.fromordinal(date(dt_year, 1, 1).toordinal() + dt_doy - 1)
	return t
def main():
	#Check latest date of the data available
	os.chdir('/home/deathstroke/projects/IITI_GNSSR/data/')
	sub = [x for x in os.listdir('.') if os.path.isdir(x)]
	dt = max(sub)
	date_upto = conv_readdate(int(dt))
	os.chdir('/home/deathstroke/projects/IITI_GNSSR/iiti_gnssr/')
	#Check oldest date of the data available
	os.chdir('/home/deathstroke/projects/IITI_GNSSR/data/')
	sub = [x for x in os.listdir('.') if os.path.isdir(x)]
	dt = min(sub)
	date_from = conv_readdate(int(dt))
	os.chdir('/home/deathstroke/projects/IITI_GNSSR/iiti_gnssr/')
	print ("\nData available from %s to %s\n" %(date_from,date_upto))
	alpha=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X']
	#Taking valid start and end dates as input from user
	validity = False
	while(validity!=True):
		start_date = int(input("Enter Start Date(dd):"))
		start_month = int(input("Enter Start Month(mm):"))
		start_year = int(input("Enter Start Year(yyyy):"))
		print ("\n")
		end_date = int(input("Enter End Date(dd):"))
		end_month = int(input("Enter End Month(mm):"))
		end_year = int(input("Enter End Year(yyyy):"))
		print ("\n")
		validity = validdt(start_date,start_month,start_year,end_date,end_month,end_year,date_from,date_upto)
		if validity == False:
			print ("\nPlease enter valid start and end dates\n")
	#Conversion into datetime format
	d1 = date(start_year,start_month,start_date)
	d2 = date(end_year,end_month,end_date+1)
	d3 = date(end_year,end_month,end_date)
	#Reading and storing data from different files
	frames = []
	for single_date in daterange(d1,d2):
		curr_date = str(convert_date(int(single_date.day),int(single_date.month),int(single_date.year)))
		curr_folder = str(str(int(single_date.year)%2000)+str(curr_date))
		for letter in alpha:
			try:
				filename = str('IITI'+curr_date+letter+'.'+'16_.ismr')
				with open('/home/deathstroke/projects/IITI_GNSSR/data/%s/%s' %(curr_folder,filename)) as f:
					df = pd.read_csv(f,usecols=[1,2,22],names=['time','svid','TEC'])
					frames.append(df)
			except (IOError):
				df1 = copy.deepcopy(frames[len(frames)-1])
				df1['time']=df['time']+3600
				tec = ['nan' for each in df1['time']]
				df1['TEC'] = tec
				frames.append(df1)
	result =pd.concat(frames)
	result['t-hrs'] = gettime(result['time'])
	dfm = result.groupby('svid')
	svid = set()
	for elements in result['svid']:
			svid.add(elements)
	svid1 = sorted(svid)
	cnt = 0
	while(cnt!=1):
		print (
	'''Choose the satellite constellation whose data is required:-
	1. GPS
	2. GLONASS
		'''
		)
		constl = int(input(">> "))
		if constl==1:
			for each in svid1:
				if each>37:
					svid.remove(each)
			svid2 = sorted(svid)
			n = 37
			constl = 'gps'
			cnt=1
		elif constl==2:
			for each in svid1:
				if each<38 or each>61:
					svid.remove(each)
			svid2 = sorted(svid)
			constl = 'glonass'
			n = 24
			cnt=1
		else:
			print ("\nPlease enter a valid input")
	#Calculating average data points for plotting
	sumtime = 0
	count = 0
	for each in svid2:
		dftemp = dfm.get_group(each)
		timedf = np.array(dftemp['time'])
		tecdf = np.array(dftemp['TEC'],dtype=float)
		sumtime = sumtime+(timedf.size-countnan(tecdf))
		count = count+1
	avg = sumtime/count
	val = avg
	#Counting the number of plots
	count = 0
	for each in svid2:
		dftemp = dfm.get_group(each)
		timedf = np.array(dftemp['t-hrs'])
		tecdf = np.array(dftemp['TEC'],dtype=float)
		if timedf.size-countnan(tecdf)>val:
			count = count +1
	#Plotting each satellite with datapoints greater than average
	clr = iter(plt.cm.rainbow(np.linspace(0,1,count)))
	handles = []
	for each in svid2:
		dftemp = dfm.get_group(each)
		timedf = np.array(dftemp['t-hrs'])
		tecdf = np.array(dftemp['TEC'],dtype=float)
		if timedf.size-countnan(tecdf)>val:
			cl = next(clr)
			plt.plot(timedf,tecdf,label='%d' %each,c=cl)
			handles.append(str(each))
	# Ensure that the axis ticks only show up on the bottom and left of the plot.  
	# Ticks on the right and top of the plot are generally unnecessary chartjunk.  
	ax = plt.subplot(111)    
	ax.spines["top"].set_visible(False)    
	# ax.spines["bottom"].set_visible(False)    
	ax.spines["right"].set_visible(False)    
	# ax.spines["left"].set_visible(False)
	# Ensure that the axis ticks only show up on the bottom and left of the plot.  
	# Ticks on the right and top of the plot are generally unnecessary chartjunk.  
	ax.get_xaxis().tick_bottom()  
	ax.get_yaxis().tick_left()
	plt.xlabel('Time in hours(0 is 5:30 AM IST on %s)' %d1,fontsize=16)
	plt.ylabel(r'Value of TEC(in TECU x $\frac{10^{16}}{m^2}$)',fontsize=16)
	plt.title('TEC data collected from %s constellation for %s to %s \nShowing satellites with %d+ datapoints' %(constl.upper(),d1,d3,val))
	plt.legend(bbox_to_anchor=(1, 1), loc='upper left',prop={'size':12}, borderaxespad=0.,frameon=False)
	plt.show()
if __name__=="__main__":
	main()
 | 
	mit | 
| 
	dvoytenk/FareHarrier | 
	app.py | 
	1 | 
	6509 | 
	import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from darksky import get_forecast
import pendulum
import os
app = dash.Dash(__name__)
server = app.server
server.secret_key = os.environ.get('SECRET_KEY', 'my-secret-key')
app.title='FareHarrier'
stops=pd.read_pickle('stations.pkl')
stops=stops[:-2]
lats=stops['stop_lat']
lons=stops['stop_lon']
labels=list(stops['stop_name'])
stopnums=stops['parent_station'].astype('float')
rf = joblib.load('rf23.pkl') 
keys=['day_of_week','weeks','hr','stop_id','heat_index']
def serve_layout():
    return html.Div(
    [
    html.Div([
        html.Div(html.H1('Fare',style={'color':'orange','float':'right'}), className="six columns"),
        html.Div(html.H1('Harrier',style={'color':'black'}),className="row")]),
    
    
    html.Div(id='text-content'),
    
        html.H2('We use machine learning to send NYC taxi drivers to subway stations with long predicted wait times.'),
    html.Div(children='''
        Select a train direction and move the slider to predict wait times over the next 24 hours.
    '''),
        html.Div(children='''
        Hover over the circles to see station names and predicted wait times.
    '''),
        
    html.Br([]),
    html.Label('Train Direction:'),
    dcc.Dropdown(
        id='dirdropdown',
        options=[
            {'label': 'Uptown', 'value': 'uptown'},
            {'label': 'Downtown', 'value': 'downtown'},
        ],
        value='uptown'
    ),
    
    #hrs=[pendulum.now().add(hours=i).hour for i in range(0,24)]
    
      html.Label('Time of day'),
    dcc.Slider(
        id='numslider',
        min=0,
        max=23,
        #marks={i:str(i) for i in range(0,24)},
        marks={i:'' if i%2==1 else pendulum.now(tz='America/New_York').add(hours=i).strftime('%b \n %d \n %H:00') for i in range(0,24)},
        value=0,
        #vertical=True,
        #style={'width':'75%'},
    ),
    
      #html.Div(children='''    
    #'''),
    #html.Div(id='text-content'),
    html.Br([]),
    html.Br([]),
    html.Br([]),
    
    #the displaymodebar disables the save fig and plotly links
    #the undo button is disabled via css
    dcc.Graph(id='map',config={'displayModeBar': False},style={'width': '60%','text-align':'center','padding':'10px'}),
    
    html.Br([]),
    
    html.Div([
    dcc.RadioItems(options=[
        {'label': i, 'value': i} for i in ['Terms', 'About']
    ], value='Terms',
    id='navigation-links'),
    html.Div(id='body',children='''''')
    ])
    
    ], style={'width':'60%','margin':'auto'})
    
    
app.layout=serve_layout    
@app.callback(dash.dependencies.Output('body', 'children'), [dash.dependencies.Input('navigation-links', 'value')])
def display_children(chapter):
    if chapter == 'Terms':
        return [html.Div(children='''We do not store or share any of your information.'''),html.A('Powered by Dark Sky.',href="https://darksky.net/poweredby/"),]
    elif chapter == 'About':
        return [html.Div(children='''Built by Denis Voytenko for the Fall 2017 Insight Data Science Fellowship project.'''),
	    html.Div(children='''Presentation slides are available here:'''),
            html.A('Google Slides',href="https://docs.google.com/presentation/d/e/2PACX-1vT7lohxWn4W9GUVkMLwr4RYHcpotDEO5AEHQSLwtgeIYjxdBQMPAJKHHl_Z2668W7hEAZPZ___Q92qz/pub?start=false&loop=false&delayms=3000"),
            html.Div(children='''Source code for this project available here:'''),
            html.A('GitHub',href="https://github.com/dvoytenk/FareHarrier"),
            ]
@app.callback(
    dash.dependencies.Output('map', 'figure'),[dash.dependencies.Input('numslider', 'value'),dash.dependencies.Input('dirdropdown', 'value')])
def update_graph(slider_value,dropdown_value):  
    S=np.array(stopnums)
    #clean up terminal stations
    termini=[140.,247.,257.,640.,101.,401.,201.,501.,601,301.,138.]
    termini_idx=[ np.where(S==t)[0][0] for t in termini]
    if dropdown_value=='uptown':
        multiplier=1
        #termini=[101.,401.,201.,501.,601]
        #termini_idx=[ np.where(S==t)[0][0] for t in termini]
    if dropdown_value=='downtown':
        multiplier=-1
        #termini=[140.,247.,257.,640.,101.,401.,201.,501.,601,301.]
        #termini_idx=[ np.where(S==t)[0][0] for t in termini]
    #do some model/darksky stuff here
    params=get_forecast()
    vals=params[slider_value]
    
    inputmatrix=np.ones([len(stopnums),len(keys)])
    inp=np.multiply(inputmatrix,vals)
    inp[:,3]=multiplier*stopnums
    wt=rf.predict(inp)
    wt[termini_idx]=0.0
    
    #make terminal indices to account for long times there in specific directions
    
    wtlabels=['%1.d'%round(w) for w in wt]
    scale=4.*wt #we want the first value to be no more than 90, so no point in normalizing it any furhter
    wtc= ['hsl('+str(h)+',50%'+',50%)' for h in scale]
    #wtc= ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, len(wt))]
  
    #timelabels=[labels[i]+', '+wtlabels[i]+' min.' for i in range(len(labels))]
    timelabels=[]
    for i in range(len(labels)):
	if wtlabels[i]=='%1.d'%0.0:
		if 'Cortlandt St'== labels[i]:
			timelabels.append(labels[i]+', '+'closed')
                else:
			timelabels.append(labels[i]+', '+'terminus')
        else:
            timelabels.append(labels[i]+', '+wtlabels[i]+' min.') 
    return {
        'data': [{
            'lat': lats,
            'lon': lons,
            'marker': {
                'size': wt,
                'color':wtc,
            },
            'text':  timelabels,
            #'customdata': labels,
            'type': 'scattermapbox'
        }],
        'layout': {
            'mapbox': {
                'accesstoken': 'YOURMAPBOXTOKENGOESHERE',
                
                'center':dict(lat=40.754,lon=-73.977),
                'zoom':10,
            },
            'hovermode': 'closest',
            #'margin': {'l': 100, 'r':100, 't': 100, 'b':100},
            #'margin':'auto',
            'width':'800',
            'height':'800',
            #'padding':'10px',
                
        }
    }
app.css.append_css({
    #'external_url': 'https://rawgit.com/dvoytenk/skipthetrain/master/bWLwgP.css'
     'external_url':'https://cdn.rawgit.com/dvoytenk/FareHarrier/f655b9e9/bWLwgP.css',
})
if __name__ == '__main__':
    #app.run_server(host='0.0.0.0',port=8000,debug=False)
    app.run_server()
 | 
	bsd-3-clause | 
| 
	ryanraaum/african-mtdna | 
	popdata_sources/graven1995/process.py | 
	1 | 
	2204 | 
	from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0,'SeqRange'])
region_str = metadata.ix[0,'SeqRange']
region_parts = region_str.split(';')
region1 = range2region(region_parts[0])
region2 = range2region(region_parts[1])
with open('graven1995.csv', 'rU') as f:
	header = f.readline()
	reference = f.readline()
	data = f.readlines()
positions = header.strip().split(',')[2:]
reference = reference.strip().split(',')[2:]
hids = []
freq = []
vals = []
for l in data:
	x = l.strip().split(',')
	hids.append(x[0])
	freq.append(int(x[1]))
	vals.append(x[2:])
sites = []
for i in range(len(vals)):
	x = vals[i]
	y = []
	for j in range(len(x)):
		if x[j] != '.':
			if j == 83:
				y.append('309.1C')
			elif j == 84:
				y.append('309.2C')
			elif j == 85:
				y.append('313.1C')
			elif j == 86:
				y.append('315.1C')
			else:
				y.append('%s%s' % (positions[j], x[j]))
	sites.append(' '.join(y))
## Validate
passed_validation = True
for i in range(len(sites)):
	curr_sites = str2sites(sites[i])
	cseq1 = sites2seq(curr_sites, region1)
	cseq2 = sites2seq(curr_sites, region2)
	mysites1 = seq2sites(cseq1)
	mysites2 = seq2sites(cseq2)
	mysites = mysites1 + mysites2
	if not mysites == curr_sites:
		seq = cseq1 + cseq2
		myseq = translate(sites2seq(mysites, region1), None, '-') + translate(sites2seq(mysites, region2), None, '-')
		if not seq == myseq:
			passed_validation = False
			print i, hids[i]
if passed_validation:
	count = 0
	prefix = metadata.ix[0,'NewPrefix']
	with open('processed.csv', 'w') as f:
		for i in range(len(sites)):
			hid = hids[i]
			curr_sites = str2sites(sites[i])
			cseq1 = sites2seq(curr_sites, region1)
			cseq2 = sites2seq(curr_sites, region2)
			mysites1 = seq2sites(cseq1)
			mysites2 = seq2sites(cseq2)
			mysites = mysites1 + mysites2
			mysites = ' '.join([str(x) for x in mysites])
			for j in range(freq[i]):
				count += 1
				num = str(count).zfill(3)
				newid = prefix + num
				f.write('%s,%s,%s\n' % (newid, hid, mysites)) | 
	cc0-1.0 | 
| 
	CJ-Jewell/ThinkStats2 | 
	code/hinc.py | 
	67 | 
	1494 | 
	"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
    """Converts dollar amounts to integers."""
    try:
        return int(s.lstrip('$').replace(',', ''))
    except ValueError:
        if s == 'Under':
            return 0
        elif s == 'over':
            return np.inf
        return None
def ReadData(filename='hinc06.csv'):
    """Reads filename and returns populations in thousands
    filename: string
    returns: pandas Series of populations in thousands
    """
    data = pandas.read_csv(filename, header=None, skiprows=9)
    cols = data[[0, 1]]
        
    res = []
    for _, row in cols.iterrows():
        label, freq = row.values
        freq = int(freq.replace(',', ''))
        t = label.split()
        low, high = Clean(t[0]), Clean(t[-1])
        res.append((high, freq))
    df = pandas.DataFrame(res)
    # correct the first range
    df[0][0] -= 1
    # compute the cumulative sum of the freqs
    df[2] = df[1].cumsum()
    # normalize the cumulative freqs
    total = df[2][41]
    df[3] = df[2] / total
    # add column names
    df.columns = ['income',  'freq', 'cumsum', 'ps']
    return df
def main():
    df = ReadData()
    print(df)
if __name__ == "__main__":
    main()
 | 
	gpl-3.0 | 
| 
	FRidh/scipy | 
	scipy/stats/_multivariate.py | 
	17 | 
	69089 | 
	#
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
    """
    Infer dimensionality from mean or covariance matrix, ensure that
    mean and covariance are full vector resp. matrix.
    """
    # Try to infer dimensionality
    if dim is None:
        if mean is None:
            if cov is None:
                dim = 1
            else:
                cov = np.asarray(cov, dtype=float)
                if cov.ndim < 2:
                    dim = 1
                else:
                    dim = cov.shape[0]
        else:
            mean = np.asarray(mean, dtype=float)
            dim = mean.size
    else:
        if not np.isscalar(dim):
            raise ValueError("Dimension of random variable must be a scalar.")
    # Check input sizes and return full arrays for mean and cov if necessary
    if mean is None:
        mean = np.zeros(dim)
    mean = np.asarray(mean, dtype=float)
    if cov is None:
        cov = 1.0
    cov = np.asarray(cov, dtype=float)
    if dim == 1:
        mean.shape = (1,)
        cov.shape = (1, 1)
    if mean.ndim != 1 or mean.shape[0] != dim:
        raise ValueError("Array 'mean' must be a vector of length %d." % dim)
    if cov.ndim == 0:
        cov = cov * np.eye(dim)
    elif cov.ndim == 1:
        cov = np.diag(cov)
    elif cov.ndim == 2 and cov.shape != (dim, dim):
        rows, cols = cov.shape
        if rows != cols:
            msg = ("Array 'cov' must be square if it is two dimensional,"
                   " but cov.shape = %s." % str(cov.shape))
        else:
            msg = ("Dimension mismatch: array 'cov' is of shape %s,"
                   " but 'mean' is a vector of length %d.")
            msg = msg % (str(cov.shape), len(mean))
        raise ValueError(msg)
    elif cov.ndim > 2:
        raise ValueError("Array 'cov' must be at most two-dimensional,"
                         " but cov.ndim = %d" % cov.ndim)
    return dim, mean, cov
def _process_quantiles(x, dim):
    """
    Adjust quantiles array so that last axis labels the components of
    each data point.
    """
    x = np.asarray(x, dtype=float)
    if x.ndim == 0:
        x = x[np.newaxis]
    elif x.ndim == 1:
        if dim == 1:
            x = x[:, np.newaxis]
        else:
            x = x[np.newaxis, :]
    return x
def _squeeze_output(out):
    """
    Remove single-dimensional entries from array and convert to scalar,
    if necessary.
    """
    out = out.squeeze()
    if out.ndim == 0:
        out = out[()]
    return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
    """
    Determine which eigenvalues are "small" given the spectrum.
    This is for compatibility across various linear algebra functions
    that should agree about whether or not a Hermitian matrix is numerically
    singular and what is its numerical matrix rank.
    This is designed to be compatible with scipy.linalg.pinvh.
    Parameters
    ----------
    spectrum : 1d ndarray
        Array of eigenvalues of a Hermitian matrix.
    cond, rcond : float, optional
        Cutoff for small eigenvalues.
        Singular values smaller than rcond * largest_eigenvalue are
        considered zero.
        If None or -1, suitable machine precision is used.
    Returns
    -------
    eps : float
        Magnitude cutoff for numerical negligibility.
    """
    if rcond is not None:
        cond = rcond
    if cond in [None, -1]:
        t = spectrum.dtype.char.lower()
        factor = {'f': 1E3, 'd': 1E6}
        cond = factor[t] * np.finfo(t).eps
    eps = cond * np.max(abs(spectrum))
    return eps
def _pinv_1d(v, eps=1e-5):
    """
    A helper function for computing the pseudoinverse.
    Parameters
    ----------
    v : iterable of numbers
        This may be thought of as a vector of eigenvalues or singular values.
    eps : float
        Values with magnitude no greater than eps are considered negligible.
    Returns
    -------
    v_pinv : 1d float ndarray
        A vector of pseudo-inverted numbers.
    """
    return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
    """
    Compute coordinated functions of a symmetric positive semidefinite matrix.
    This class addresses two issues.  Firstly it allows the pseudoinverse,
    the logarithm of the pseudo-determinant, and the rank of the matrix
    to be computed using one call to eigh instead of three.
    Secondly it allows these functions to be computed in a way
    that gives mutually compatible results.
    All of the functions are computed with a common understanding as to
    which of the eigenvalues are to be considered negligibly small.
    The functions are designed to coordinate with scipy.linalg.pinvh()
    but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
    Parameters
    ----------
    M : array_like
        Symmetric positive semidefinite matrix (2-D).
    cond, rcond : float, optional
        Cutoff for small eigenvalues.
        Singular values smaller than rcond * largest_eigenvalue are
        considered zero.
        If None or -1, suitable machine precision is used.
    lower : bool, optional
        Whether the pertinent array data is taken from the lower
        or upper triangle of M. (Default: lower)
    check_finite : bool, optional
        Whether to check that the input matrices contain only finite
        numbers. Disabling may give a performance gain, but may result
        in problems (crashes, non-termination) if the inputs do contain
        infinities or NaNs.
    allow_singular : bool, optional
        Whether to allow a singular matrix.  (Default: True)
    Notes
    -----
    The arguments are similar to those of scipy.linalg.pinvh().
    """
    def __init__(self, M, cond=None, rcond=None, lower=True,
                 check_finite=True, allow_singular=True):
        # Compute the symmetric eigendecomposition.
        # Note that eigh takes care of array conversion, chkfinite,
        # and assertion that the matrix is square.
        s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
        eps = _eigvalsh_to_eps(s, cond, rcond)
        if np.min(s) < -eps:
            raise ValueError('the input matrix must be positive semidefinite')
        d = s[s > eps]
        if len(d) < len(s) and not allow_singular:
            raise np.linalg.LinAlgError('singular matrix')
        s_pinv = _pinv_1d(s, eps)
        U = np.multiply(u, np.sqrt(s_pinv))
        # Initialize the eagerly precomputed attributes.
        self.rank = len(d)
        self.U = U
        self.log_pdet = np.sum(np.log(d))
        # Initialize an attribute to be lazily computed.
        self._pinv = None
    @property
    def pinv(self):
        if self._pinv is None:
            self._pinv = np.dot(self.U, self.U.T)
        return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
    Mean of the distribution (default zero)
cov : array_like, optional
    Covariance matrix of the distribution (default one)
allow_singular : bool, optional
    Whether to allow a singular covariance matrix.  (Default: False)
"""
_doc_callparams_note = \
    """Setting the parameter `mean` to `None` is equivalent to having `mean`
    be the zero-vector. The parameter `cov` can be a scalar, in which case
    the covariance matrix is the identity times that value, a vector of
    diagonal entries for the covariance matrix, or a two-dimensional
    array_like.
    """
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
    If int or RandomState, use it for drawing the random variates.
    If None (or np.random), the global np.random state is used.
    Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
    """See class definition for a detailed description of parameters."""
docdict_params = {
    '_doc_default_callparams': _doc_default_callparams,
    '_doc_callparams_note': _doc_callparams_note,
    '_doc_random_state': _doc_random_state
}
docdict_noparams = {
    '_doc_default_callparams': _doc_frozen_callparams,
    '_doc_callparams_note': _doc_frozen_callparams_note,
    '_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
    """
    Class which encapsulates common functionality between all multivariate
    distributions.
    """
    def __init__(self, seed=None):
        super(multi_rv_generic, self).__init__()
        self._random_state = check_random_state(seed)
    @property
    def random_state(self):
        """ Get or set the RandomState object for generating random variates.
        This can be either None or an existing RandomState object.
        If None (or np.random), use the RandomState singleton used by np.random.
        If already a RandomState instance, use it.
        If an int, use a new RandomState instance seeded with seed.
        """
        return self._random_state
    @random_state.setter
    def random_state(self, seed):
        self._random_state = check_random_state(seed)
    def _get_random_state(self, random_state):
        if random_state is not None:
            return check_random_state(random_state)
        else:
            return self._random_state
class multi_rv_frozen(object):
    """
    Class which encapsulates common functionality between all frozen
    multivariate distributions.
    """
    @property
    def random_state(self):
        return self._dist._random_state
    @random_state.setter
    def random_state(self, seed):
        self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
    r"""
    A multivariate normal random variable.
    The `mean` keyword specifies the mean. The `cov` keyword specifies the
    covariance matrix.
    Methods
    -------
    ``pdf(x, mean=None, cov=1, allow_singular=False)``
        Probability density function.
    ``logpdf(x, mean=None, cov=1, allow_singular=False)``
        Log of the probability density function.
    ``rvs(mean=None, cov=1, size=1, random_state=None)``
        Draw random samples from a multivariate normal distribution.
    ``entropy()``
        Compute the differential entropy of the multivariate normal.
    Parameters
    ----------
    x : array_like
        Quantiles, with the last axis of `x` denoting the components.
    %(_doc_default_callparams)s
    %(_doc_random_state)s
    Alternatively, the object may be called (as a function) to fix the mean
    and covariance parameters, returning a "frozen" multivariate normal
    random variable:
    rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
        - Frozen object with the same methods but holding the given
          mean and covariance fixed.
    Notes
    -----
    %(_doc_callparams_note)s
    The covariance matrix `cov` must be a (symmetric) positive
    semi-definite matrix. The determinant and inverse of `cov` are computed
    as the pseudo-determinant and pseudo-inverse, respectively, so
    that `cov` does not need to have full rank.
    The probability density function for `multivariate_normal` is
    .. math::
        f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
               \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
    where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
    and :math:`k` is the dimension of the space where :math:`x` takes values.
    .. versionadded:: 0.14.0
    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from scipy.stats import multivariate_normal
    >>> x = np.linspace(0, 5, 10, endpoint=False)
    >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
    array([ 0.00108914,  0.01033349,  0.05946514,  0.20755375,  0.43939129,
            0.56418958,  0.43939129,  0.20755375,  0.05946514,  0.01033349])
    >>> fig1 = plt.figure()
    >>> ax = fig1.add_subplot(111)
    >>> ax.plot(x, y)
    The input quantiles can be any shape of array, as long as the last
    axis labels the components.  This allows us for instance to
    display the frozen pdf for a non-isotropic random variable in 2D as
    follows:
    >>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
    >>> pos = np.empty(x.shape + (2,))
    >>> pos[:, :, 0] = x; pos[:, :, 1] = y
    >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
    >>> fig2 = plt.figure()
    >>> ax2 = fig2.add_subplot(111)
    >>> ax2.contourf(x, y, rv.pdf(pos))
    """
    def __init__(self, seed=None):
        super(multivariate_normal_gen, self).__init__(seed)
        self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
    def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
        """
        Create a frozen multivariate normal distribution.
        See `multivariate_normal_frozen` for more information.
        """
        return multivariate_normal_frozen(mean, cov,
                                          allow_singular=allow_singular,
                                          seed=seed)
    def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
        """
        Parameters
        ----------
        x : ndarray
            Points at which to evaluate the log of the probability
            density function
        mean : ndarray
            Mean of the distribution
        prec_U : ndarray
            A decomposition such that np.dot(prec_U, prec_U.T)
            is the precision matrix, i.e. inverse of the covariance matrix.
        log_det_cov : float
            Logarithm of the determinant of the covariance matrix
        rank : int
            Rank of the covariance matrix.
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'logpdf' instead.
        """
        dev = x - mean
        maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
        return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
    def logpdf(self, x, mean, cov, allow_singular=False):
        """
        Log of the multivariate normal probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
        %(_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Log of the probability density function evaluated at `x`
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, mean, cov = _process_parameters(None, mean, cov)
        x = _process_quantiles(x, dim)
        psd = _PSD(cov, allow_singular=allow_singular)
        out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
        return _squeeze_output(out)
    def pdf(self, x, mean, cov, allow_singular=False):
        """
        Multivariate normal probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
        %(_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Probability density function evaluated at `x`
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, mean, cov = _process_parameters(None, mean, cov)
        x = _process_quantiles(x, dim)
        psd = _PSD(cov, allow_singular=allow_singular)
        out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
        return _squeeze_output(out)
    def rvs(self, mean=None, cov=1, size=1, random_state=None):
        """
        Draw random samples from a multivariate normal distribution.
        Parameters
        ----------
        %(_doc_default_callparams)s
        size : integer, optional
            Number of samples to draw (default 1).
        %(_doc_random_state)s
        Returns
        -------
        rvs : ndarray or scalar
            Random variates of size (`size`, `N`), where `N` is the
            dimension of the random variable.
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, mean, cov = _process_parameters(None, mean, cov)
        random_state = self._get_random_state(random_state)
        out = random_state.multivariate_normal(mean, cov, size)
        return _squeeze_output(out)
    def entropy(self, mean=None, cov=1):
        """
        Compute the differential entropy of the multivariate normal.
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        h : scalar
            Entropy of the multivariate normal distribution
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, mean, cov = _process_parameters(None, mean, cov)
        _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
        return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
    def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
        """
        Create a frozen multivariate normal distribution.
        Parameters
        ----------
        mean : array_like, optional
            Mean of the distribution (default zero)
        cov : array_like, optional
            Covariance matrix of the distribution (default one)
        allow_singular : bool, optional
            If this flag is True then tolerate a singular
            covariance matrix (default False).
        seed : None or int or np.random.RandomState instance, optional
            This parameter defines the RandomState object to use for drawing
            random variates.
            If None (or np.random), the global np.random state is used.
            If integer, it is used to seed the local RandomState instance
            Default is None.
        Examples
        --------
        When called with the default parameters, this will create a 1D random
        variable with mean 0 and covariance 1:
        >>> from scipy.stats import multivariate_normal
        >>> r = multivariate_normal()
        >>> r.mean
        array([ 0.])
        >>> r.cov
        array([[1.]])
        """
        self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
        self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
        self._dist = multivariate_normal_gen(seed)
    def logpdf(self, x):
        x = _process_quantiles(x, self.dim)
        out = self._dist._logpdf(x, self.mean, self.cov_info.U,
                                 self.cov_info.log_pdet, self.cov_info.rank)
        return _squeeze_output(out)
    def pdf(self, x):
        return np.exp(self.logpdf(x))
    def rvs(self, size=1, random_state=None):
        return self._dist.rvs(self.mean, self.cov, size, random_state)
    def entropy(self):
        """
        Computes the differential entropy of the multivariate normal.
        Returns
        -------
        h : scalar
            Entropy of the multivariate normal distribution
        """
        log_pdet = self.cov_info.log_pdet
        rank = self.cov_info.rank
        return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
    method = multivariate_normal_gen.__dict__[name]
    method_frozen = multivariate_normal_frozen.__dict__[name]
    method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
    method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
    The concentration parameters. The number of entries determines the
    dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
    """See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
    '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
    '_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
    '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
    '_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
    alpha = np.asarray(alpha)
    if np.min(alpha) <= 0:
        raise ValueError("All parameters must be greater than 0")
    elif alpha.ndim != 1:
        raise ValueError("Parameter vector 'a' must be one dimensional, " +
                         "but a.shape = %s." % str(alpha.shape))
    return alpha
def _dirichlet_check_input(alpha, x):
    x = np.asarray(x)
    if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
        raise ValueError("Vector 'x' must have one entry less then the" +
                         " parameter vector 'a', but alpha.shape = " +
                         "%s and " % alpha.shape +
                         "x.shape = %s." % x.shape)
    if x.shape[0] != alpha.shape[0]:
        xk = np.array([1 - np.sum(x, 0)])
        if xk.ndim == 1:
            x = np.append(x, xk)
        elif xk.ndim == 2:
            x = np.vstack((x, xk))
        else:
            raise ValueError("The input must be one dimensional or a two "
                             "dimensional matrix containing the entries.")
    if np.min(x) < 0:
        raise ValueError("Each entry in 'x' must be greater or equal zero.")
    if np.max(x) > 1:
        raise ValueError("Each entry in 'x' must be smaller or equal one.")
    if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
        raise ValueError("The input vector 'x' must lie within the normal " +
                         "simplex. but sum(x)=%f." % np.sum(x, 0))
    return x
def _lnB(alpha):
    r"""
    Internal helper function to compute the log of the useful quotient
    .. math::
        B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
    Parameters
    ----------
    %(_dirichlet_doc_default_callparams)s
    Returns
    -------
    B : scalar
        Helper quotient, internal use only
    """
    return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
    r"""
    A Dirichlet random variable.
    The `alpha` keyword specifies the concentration parameters of the
    distribution.
    .. versionadded:: 0.15.0
    Methods
    -------
    ``pdf(x, alpha)``
        Probability density function.
    ``logpdf(x, alpha)``
        Log of the probability density function.
    ``rvs(alpha, size=1, random_state=None)``
        Draw random samples from a Dirichlet distribution.
    ``mean(alpha)``
        The mean of the Dirichlet distribution
    ``var(alpha)``
        The variance of the Dirichlet distribution
    ``entropy(alpha)``
        Compute the differential entropy of the multivariate normal.
    Parameters
    ----------
    x : array_like
        Quantiles, with the last axis of `x` denoting the components.
    %(_dirichlet_doc_default_callparams)s
    %(_doc_random_state)s
    Alternatively, the object may be called (as a function) to fix
    concentration parameters, returning a "frozen" Dirichlet
    random variable:
    rv = dirichlet(alpha)
        - Frozen object with the same methods but holding the given
          concentration parameters fixed.
    Notes
    -----
    Each :math:`\alpha` entry must be positive. The distribution has only
    support on the simplex defined by
    .. math::
        \sum_{i=1}^{K} x_i \le 1
    The probability density function for `dirichlet` is
    .. math::
        f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
    where
    .. math::
        \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
                                     {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
    and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
    concentration parameters and :math:`K` is the dimension of the space
    where :math:`x` takes values.
    """
    def __init__(self, seed=None):
        super(dirichlet_gen, self).__init__(seed)
        self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
    def __call__(self, alpha, seed=None):
        return dirichlet_frozen(alpha, seed=seed)
    def _logpdf(self, x, alpha):
        """
        Parameters
        ----------
        x : ndarray
            Points at which to evaluate the log of the probability
            density function
        %(_dirichlet_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'logpdf' instead.
        """
        lnB = _lnB(alpha)
        return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
    def logpdf(self, x, alpha):
        """
        Log of the Dirichlet probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
        %(_dirichlet_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Log of the probability density function evaluated at `x`.
        """
        alpha = _dirichlet_check_parameters(alpha)
        x = _dirichlet_check_input(alpha, x)
        out = self._logpdf(x, alpha)
        return _squeeze_output(out)
    def pdf(self, x, alpha):
        """
        The Dirichlet probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
        %(_dirichlet_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            The probability density function evaluated at `x`.
        """
        alpha = _dirichlet_check_parameters(alpha)
        x = _dirichlet_check_input(alpha, x)
        out = np.exp(self._logpdf(x, alpha))
        return _squeeze_output(out)
    def mean(self, alpha):
        """
        Compute the mean of the dirichlet distribution.
        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s
        Returns
        -------
        mu : scalar
            Mean of the Dirichlet distribution
        """
        alpha = _dirichlet_check_parameters(alpha)
        out = alpha / (np.sum(alpha))
        return _squeeze_output(out)
    def var(self, alpha):
        """
        Compute the variance of the dirichlet distribution.
        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s
        Returns
        -------
        v : scalar
            Variance of the Dirichlet distribution
        """
        alpha = _dirichlet_check_parameters(alpha)
        alpha0 = np.sum(alpha)
        out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
        return out
    def entropy(self, alpha):
        """
        Compute the differential entropy of the dirichlet distribution.
        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s
        Returns
        -------
        h : scalar
            Entropy of the Dirichlet distribution
        """
        alpha = _dirichlet_check_parameters(alpha)
        alpha0 = np.sum(alpha)
        lnB = _lnB(alpha)
        K = alpha.shape[0]
        out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
            (alpha - 1) * scipy.special.psi(alpha))
        return _squeeze_output(out)
    def rvs(self, alpha, size=1, random_state=None):
        """
        Draw random samples from a Dirichlet distribution.
        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s
        size : int, optional
            Number of samples to draw (default 1).
        %(_doc_random_state)s
        Returns
        -------
        rvs : ndarray or scalar
            Random variates of size (`size`, `N`), where `N` is the
            dimension of the random variable.
        """
        alpha = _dirichlet_check_parameters(alpha)
        random_state = self._get_random_state(random_state)
        return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
    def __init__(self, alpha, seed=None):
        self.alpha = _dirichlet_check_parameters(alpha)
        self._dist = dirichlet_gen(seed)
    def logpdf(self, x):
        return self._dist.logpdf(x, self.alpha)
    def pdf(self, x):
        return self._dist.pdf(x, self.alpha)
    def mean(self):
        return self._dist.mean(self.alpha)
    def var(self):
        return self._dist.var(self.alpha)
    def entropy(self):
        return self._dist.entropy(self.alpha)
    def rvs(self, size=1, random_state=None):
        return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
    method = dirichlet_gen.__dict__[name]
    method_frozen = dirichlet_frozen.__dict__[name]
    method_frozen.__doc__ = doccer.docformat(
        method.__doc__, dirichlet_docdict_noparams)
    method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
    Degrees of freedom, must be greater than or equal to dimension of the
    scale matrix
scale : array_like
    Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
    """See class definition for a detailed description of parameters."""
wishart_docdict_params = {
    '_doc_default_callparams': _wishart_doc_default_callparams,
    '_doc_callparams_note': _wishart_doc_callparams_note,
    '_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
    '_doc_default_callparams': _wishart_doc_frozen_callparams,
    '_doc_callparams_note': _wishart_doc_frozen_callparams_note,
    '_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
    r"""
    A Wishart random variable.
    The `df` keyword specifies the degrees of freedom. The `scale` keyword
    specifies the scale matrix, which must be symmetric and positive definite.
    In this context, the scale matrix is often interpreted in terms of a
    multivariate normal precision matrix (the inverse of the covariance
    matrix).
    Methods
    -------
    ``pdf(x, df, scale)``
        Probability density function.
    ``logpdf(x, df, scale)``
        Log of the probability density function.
    ``rvs(df, scale, size=1, random_state=None)``
        Draw random samples from a Wishart distribution.
    ``entropy()``
        Compute the differential entropy of the Wishart distribution.
    Parameters
    ----------
    x : array_like
        Quantiles, with the last axis of `x` denoting the components.
    %(_doc_default_callparams)s
    %(_doc_random_state)s
    Alternatively, the object may be called (as a function) to fix the degrees
    of freedom and scale parameters, returning a "frozen" Wishart random
    variable:
    rv = wishart(df=1, scale=1)
        - Frozen object with the same methods but holding the given
          degrees of freedom and scale fixed.
    See Also
    --------
    invwishart, chi2
    Notes
    -----
    %(_doc_callparams_note)s
    The scale matrix `scale` must be a symmetric positive definite
    matrix. Singular matrices, including the symmetric positive semi-definite
    case, are not supported.
    The Wishart distribution is often denoted
    .. math::
        W_p(\nu, \Sigma)
    where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
    :math:`p \times p` scale matrix.
    The probability density function for `wishart` has support over positive
    definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
    its PDF is given by:
    .. math::
        f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
               |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
               \exp\left( -tr(\Sigma^{-1} S) / 2 \right)
    If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
    :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
    If the scale matrix is 1-dimensional and equal to one, then the Wishart
    distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
    distribution.
    .. versionadded:: 0.16.0
    References
    ----------
    .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
           Wiley, 1983.
    .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
           Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from scipy.stats import wishart, chi2
    >>> x = np.linspace(1e-5, 8, 100)
    >>> w = wishart.pdf(x, df=3, scale=1); w[:5]
    array([ 0.00126156,  0.10892176,  0.14793434,  0.17400548,  0.1929669 ])
    >>> c = chi2.pdf(x, 3); c[:5]
    array([ 0.00126156,  0.10892176,  0.14793434,  0.17400548,  0.1929669 ])
    >>> plt.plot(x, w)
    The input quantiles can be any shape of array, as long as the last
    axis labels the components.
    """
    def __init__(self, seed=None):
        super(wishart_gen, self).__init__(seed)
        self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
    def __call__(self, df=None, scale=None, seed=None):
        """
        Create a frozen Wishart distribution.
        See `wishart_frozen` for more information.
        """
        return wishart_frozen(df, scale, seed)
    def _process_parameters(self, df, scale):
        if scale is None:
            scale = 1.0
        scale = np.asarray(scale, dtype=float)
        if scale.ndim == 0:
            scale = scale[np.newaxis,np.newaxis]
        elif scale.ndim == 1:
            scale = np.diag(scale)
        elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
            raise ValueError("Array 'scale' must be square if it is two"
                             " dimensional, but scale.scale = %s."
                             % str(scale.shape))
        elif scale.ndim > 2:
            raise ValueError("Array 'scale' must be at most two-dimensional,"
                             " but scale.ndim = %d" % scale.ndim)
        dim = scale.shape[0]
        if df is None:
            df = dim
        elif not np.isscalar(df):
            raise ValueError("Degrees of freedom must be a scalar.")
        elif df < dim:
            raise ValueError("Degrees of freedom cannot be less than dimension"
                             " of scale matrix, but df = %d" % df)
        return dim, df, scale
    def _process_quantiles(self, x, dim):
        """
        Adjust quantiles array so that last axis labels the components of
        each data point.
        """
        x = np.asarray(x, dtype=float)
        if x.ndim == 0:
            x = x * np.eye(dim)[:, :, np.newaxis]
        if x.ndim == 1:
            if dim == 1:
                x = x[np.newaxis, np.newaxis, :]
            else:
                x = np.diag(x)[:, :, np.newaxis]
        elif x.ndim == 2:
            if not x.shape[0] == x.shape[1]:
                raise ValueError("Quantiles must be square if they are two"
                                 " dimensional, but x.shape = %s."
                                 % str(x.shape))
            x = x[:, :, np.newaxis]
        elif x.ndim == 3:
            if not x.shape[0] == x.shape[1]:
                raise ValueError("Quantiles must be square in the first two"
                                 " dimensions if they are three dimensional"
                                 ", but x.shape = %s." % str(x.shape))
        elif x.ndim > 3:
            raise ValueError("Quantiles must be at most two-dimensional with"
                             " an additional dimension for multiple"
                             "components, but x.ndim = %d" % x.ndim)
        # Now we have 3-dim array; should have shape [dim, dim, *]
        if not x.shape[0:2] == (dim, dim):
            raise ValueError('Quantiles have incompatible dimensions: should'
                             ' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
        return x
    def _process_size(self, size):
        size = np.asarray(size)
        if size.ndim == 0:
            size = size[np.newaxis]
        elif size.ndim > 1:
            raise ValueError('Size must be an integer or tuple of integers;'
                             ' thus must have dimension <= 1.'
                             ' Got size.ndim = %s' % str(tuple(size)))
        n = size.prod()
        shape = tuple(size)
        return n, shape
    def _logpdf(self, x, dim, df, scale, log_det_scale, C):
        """
        Parameters
        ----------
        x : ndarray
            Points at which to evaluate the log of the probability
            density function
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        scale : ndarray
            Scale matrix
        log_det_scale : float
            Logarithm of the determinant of the scale matrix
        C : ndarray
            Cholesky factorization of the scale matrix, lower triagular.
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'logpdf' instead.
        """
        # log determinant of x
        # Note: x has components along the last axis, so that x.T has
        # components alone the 0-th axis. Then since det(A) = det(A'), this
        # gives us a 1-dim vector of determinants
        # Retrieve tr(scale^{-1} x)
        log_det_x = np.zeros(x.shape[-1])
        scale_inv_x = np.zeros(x.shape)
        tr_scale_inv_x = np.zeros(x.shape[-1])
        for i in range(x.shape[-1]):
            _, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
            scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
            tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
        # Log PDF
        out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
               (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
                multigammaln(0.5*df, dim)))
        return out
    def logpdf(self, x, df, scale):
        """
        Log of the Wishart probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
            Each quantile must be a symmetric positive definite matrix.
        %(_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Log of the probability density function evaluated at `x`
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, df, scale = self._process_parameters(df, scale)
        x = self._process_quantiles(x, dim)
        # Cholesky decomposition of scale, get log(det(scale))
        C, log_det_scale = self._cholesky_logdet(scale)
        out = self._logpdf(x, dim, df, scale, log_det_scale, C)
        return _squeeze_output(out)
    def pdf(self, x, df, scale):
        """
        Wishart probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
            Each quantile must be a symmetric positive definite matrix.
        %(_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Probability density function evaluated at `x`
        Notes
        -----
        %(_doc_callparams_note)s
        """
        return np.exp(self.logpdf(x, df, scale))
    def _mean(self, dim, df, scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        %(_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'mean' instead.
        """
        return df * scale
    def mean(self, df, scale):
        """
        Mean of the Wishart distribution
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        mean : float
            The mean of the distribution
        """
        dim, df, scale = self._process_parameters(df, scale)
        out = self._mean(dim, df, scale)
        return _squeeze_output(out)
    def _mode(self, dim, df, scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        %(_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'mode' instead.
        """
        if df >= dim + 1:
            out = (df-dim-1) * scale
        else:
            out = None
        return out
    def mode(self, df, scale):
        """
        Mode of the Wishart distribution
        Only valid if the degrees of freedom are greater than the dimension of
        the scale matrix.
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        mode : float or None
            The Mode of the distribution
        """
        dim, df, scale = self._process_parameters(df, scale)
        out = self._mode(dim, df, scale)
        return _squeeze_output(out) if out is not None else out
    def _var(self, dim, df, scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        %(_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'var' instead.
        """
        var = scale**2
        diag = scale.diagonal()  # 1 x dim array
        var += np.outer(diag, diag)
        var *= df
        return var
    def var(self, df, scale):
        """
        Variance of the Wishart distribution
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        var : float
            The variance of the distribution
        """
        dim, df, scale = self._process_parameters(df, scale)
        out = self._var(dim, df, scale)
        return _squeeze_output(out)
    def _standard_rvs(self, n, shape, dim, df, random_state):
        """
        Parameters
        ----------
        n : integer
            Number of variates to generate
        shape : iterable
            Shape of the variates to generate
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        random_state : np.random.RandomState instance
            RandomState used for drawing the random variates.
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'rvs' instead.
        """
        # Random normal variates for off-diagonal elements
        n_tril = dim * (dim-1) // 2
        covariances = random_state.normal(
            size=n*n_tril).reshape(shape+(n_tril,))
        # Random chi-square variates for diagonal elements
        variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
             for i in range(dim)]].reshape((dim,) + shape[::-1]).T
        # Create the A matri(ces) - lower triangular
        A = np.zeros(shape + (dim, dim))
        # Input the covariances
        size_idx = tuple([slice(None,None,None)]*len(shape))
        tril_idx = np.tril_indices(dim, k=-1)
        A[size_idx + tril_idx] = covariances
        # Input the variances
        diag_idx = np.diag_indices(dim)
        A[size_idx + diag_idx] = variances
        return A
    def _rvs(self, n, shape, dim, df, C, random_state):
        """
        Parameters
        ----------
        n : integer
            Number of variates to generate
        shape : iterable
            Shape of the variates to generate
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        scale : ndarray
            Scale matrix
        C : ndarray
            Cholesky factorization of the scale matrix, lower triangular.
        %(_doc_random_state)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'rvs' instead.
        """
        random_state = self._get_random_state(random_state)
        # Calculate the matrices A, which are actually lower triangular
        # Cholesky factorizations of a matrix B such that B ~ W(df, I)
        A = self._standard_rvs(n, shape, dim, df, random_state)
        # Calculate SA = C A A' C', where SA ~ W(df, scale)
        # Note: this is the product of a (lower) (lower) (lower)' (lower)'
        #       or, denoting B = AA', it is C B C' where C is the lower
        #       triangular Cholesky factorization of the scale matrix.
        #       this appears to conflict with the instructions in [1]_, which
        #       suggest that it should be D' B D where D is the lower
        #       triangular factorization of the scale matrix. However, it is
        #       meant to refer to the Bartlett (1933) representation of a
        #       Wishart random variate as L A A' L' where L is lower triangular
        #       so it appears that understanding D' to be upper triangular
        #       is either a typo in or misreading of [1]_.
        for index in np.ndindex(shape):
            CA = np.dot(C, A[index])
            A[index] = np.dot(CA, CA.T)
        return A
    def rvs(self, df, scale, size=1, random_state=None):
        """
        Draw random samples from a Wishart distribution.
        Parameters
        ----------
        %(_doc_default_callparams)s
        size : integer or iterable of integers, optional
            Number of samples to draw (default 1).
        %(_doc_random_state)s
        Returns
        -------
        rvs : ndarray
            Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
            the dimension of the scale matrix.
        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, shape = self._process_size(size)
        dim, df, scale = self._process_parameters(df, scale)
        # Cholesky decomposition of scale
        C = scipy.linalg.cholesky(scale, lower=True)
        out = self._rvs(n, shape, dim, df, C, random_state)
        return _squeeze_output(out)
    def _entropy(self, dim, df, log_det_scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        log_det_scale : float
            Logarithm of the determinant of the scale matrix
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'entropy' instead.
        """
        return (
            0.5 * (dim+1) * log_det_scale +
            0.5 * dim * (dim+1) * _LOG_2 +
            multigammaln(0.5*df, dim) -
            0.5 * (df - dim - 1) * np.sum(
                [psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
            ) +
            0.5 * df * dim
        )
    def entropy(self, df, scale):
        """
        Compute the differential entropy of the Wishart.
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        h : scalar
            Entropy of the Wishart distribution
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, df, scale = self._process_parameters(df, scale)
        _, log_det_scale = self._cholesky_logdet(scale)
        return self._entropy(dim, df, log_det_scale)
    def _cholesky_logdet(self, scale):
        """
        Compute Cholesky decomposition and determine (log(det(scale)).
        Parameters
        ----------
        scale : ndarray
            Scale matrix.
        Returns
        -------
        c_decomp : ndarray
            The Cholesky decomposition of `scale`.
        logdet : scalar
            The log of the determinant of `scale`.
        Notes
        -----
        This computation of ``logdet`` is equivalent to
        ``np.linalg.slogdet(scale)``.  It is ~2x faster though.
        """
        c_decomp = scipy.linalg.cholesky(scale, lower=True)
        logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
        return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
    """
    Create a frozen Wishart distribution.
    Parameters
    ----------
    df : array_like
        Degrees of freedom of the distribution
    scale : array_like
        Scale matrix of the distribution
    seed : None or int or np.random.RandomState instance, optional
        This parameter defines the RandomState object to use for drawing
        random variates.
        If None (or np.random), the global np.random state is used.
        If integer, it is used to seed the local RandomState instance
        Default is None.
    """
    def __init__(self, df, scale, seed=None):
        self._dist = wishart_gen(seed)
        self.dim, self.df, self.scale = self._dist._process_parameters(
            df, scale)
        self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
    def logpdf(self, x):
        x = self._dist._process_quantiles(x, self.dim)
        out = self._dist._logpdf(x, self.dim, self.df, self.scale,
                                 self.log_det_scale, self.C)
        return _squeeze_output(out)
    def pdf(self, x):
        return np.exp(self.logpdf(x))
    def mean(self):
        out = self._dist._mean(self.dim, self.df, self.scale)
        return _squeeze_output(out)
    def mode(self):
        out = self._dist._mode(self.dim, self.df, self.scale)
        return _squeeze_output(out) if out is not None else out
    def var(self):
        out = self._dist._var(self.dim, self.df, self.scale)
        return _squeeze_output(out)
    def rvs(self, size=1, random_state=None):
        n, shape = self._dist._process_size(size)
        out = self._dist._rvs(n, shape, self.dim, self.df,
                              self.C, random_state)
        return _squeeze_output(out)
    def entropy(self):
        return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
    method = wishart_gen.__dict__[name]
    method_frozen = wishart_frozen.__dict__[name]
    method_frozen.__doc__ = doccer.docformat(
        method.__doc__, wishart_docdict_noparams)
    method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
    """
    Invert the matrices a_i, using a Cholesky factorization of A, where
    a_i resides in the last two dimensions of a and the other indices describe
    the index i.
    Overwrites the data in a.
    Parameters
    ----------
    a : array
        Array of matrices to invert, where the matrices themselves are stored
        in the last two dimensions.
    check_finite : bool, optional
        Whether to check that the input matrices contain only finite numbers.
        Disabling may give a performance gain, but may result in problems
        (crashes, non-termination) if the inputs do contain infinities or NaNs.
    Returns
    -------
    x : array
        Array of inverses of the matrices ``a_i``.
    See also
    --------
    scipy.linalg.cholesky : Cholesky factorization of a matrix
    """
    if check_finite:
        a1 = asarray_chkfinite(a)
    else:
        a1 = asarray(a)
    if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
        raise ValueError('expected square matrix in last two dimensions')
    potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
    tril_idx = np.tril_indices(a.shape[-2], k=-1)
    triu_idx = np.triu_indices(a.shape[-2], k=1)
    for index in np.ndindex(a1.shape[:-2]):
        # Cholesky decomposition
        a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
                                clean=False)
        if info > 0:
            raise LinAlgError("%d-th leading minor not positive definite"
                              % info)
        if info < 0:
            raise ValueError('illegal value in %d-th argument of internal'
                             ' potrf' % -info)
        # Inversion
        a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
        if info > 0:
            raise LinAlgError("the inverse could not be computed")
        if info < 0:
            raise ValueError('illegal value in %d-th argument of internal'
                             ' potrf' % -info)
        # Make symmetric (dpotri only fills in the lower triangle)
        a1[index][triu_idx] = a1[index][tril_idx]
    return a1
class invwishart_gen(wishart_gen):
    r"""
    An inverse Wishart random variable.
    The `df` keyword specifies the degrees of freedom. The `scale` keyword
    specifies the scale matrix, which must be symmetric and positive definite.
    In this context, the scale matrix is often interpreted in terms of a
    multivariate normal covariance matrix.
    Methods
    -------
    ``pdf(x, df, scale)``
        Probability density function.
    ``logpdf(x, df, scale)``
        Log of the probability density function.
    ``rvs(df, scale, size=1, random_state=None)``
        Draw random samples from an inverse Wishart distribution.
    Parameters
    ----------
    x : array_like
        Quantiles, with the last axis of `x` denoting the components.
    %(_doc_default_callparams)s
    %(_doc_random_state)s
    Alternatively, the object may be called (as a function) to fix the degrees
    of freedom and scale parameters, returning a "frozen" inverse Wishart
    random variable:
    rv = invwishart(df=1, scale=1)
        - Frozen object with the same methods but holding the given
          degrees of freedom and scale fixed.
    See Also
    --------
    wishart
    Notes
    -----
    %(_doc_callparams_note)s
    The scale matrix `scale` must be a symmetric positive definite
    matrix. Singular matrices, including the symmetric positive semi-definite
    case, are not supported.
    The inverse Wishart distribution is often denoted
    .. math::
        W_p^{-1}(\nu, \Psi)
    where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
    :math:`p \times p` scale matrix.
    The probability density function for `invwishart` has support over positive
    definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
    then its PDF is given by:
    .. math::
        f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
               |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
               \exp\left( -tr(\Sigma S^{-1}) / 2 \right)
    If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
    :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
    If the scale matrix is 1-dimensional and equal to one, then the inverse
    Wishart distribution :math:`W_1(\nu, 1)` collapses to the
    inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
    and scale = :math:`\frac{1}{2}`.
    .. versionadded:: 0.16.0
    References
    ----------
    .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
           Wiley, 1983.
    .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
           Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from scipy.stats import invwishart, invgamma
    >>> x = np.linspace(0.01, 1, 100)
    >>> iw = invwishart.pdf(x, df=6, scale=1)
    >>> iw[:3]
    array([  1.20546865e-15,   5.42497807e-06,   4.45813929e-03])
    >>> ig = invgamma.pdf(x, 6/2., scale=1./2)
    >>> ig[:3]
    array([  1.20546865e-15,   5.42497807e-06,   4.45813929e-03])
    >>> plt.plot(x, iw)
    The input quantiles can be any shape of array, as long as the last
    axis labels the components.
    """
    def __init__(self, seed=None):
        super(invwishart_gen, self).__init__(seed)
        self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
    def __call__(self, df=None, scale=None, seed=None):
        """
        Create a frozen inverse Wishart distribution.
        See `invwishart_frozen` for more information.
        """
        return invwishart_frozen(df, scale, seed)
    def _logpdf(self, x, dim, df, scale, log_det_scale):
        """
        Parameters
        ----------
        x : ndarray
            Points at which to evaluate the log of the probability
            density function.
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        scale : ndarray
            Scale matrix
        log_det_scale : float
            Logarithm of the determinant of the scale matrix
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'logpdf' instead.
        """
        log_det_x = np.zeros(x.shape[-1])
        #scale_x_inv = np.zeros(x.shape)
        x_inv = np.copy(x).T
        if dim > 1:
            _cho_inv_batch(x_inv)  # works in-place
        else:
            x_inv = 1./x_inv
        tr_scale_x_inv = np.zeros(x.shape[-1])
        for i in range(x.shape[-1]):
            C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
            log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
            #scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
            tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
        # Log PDF
        out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
               (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
               multigammaln(0.5*df, dim))
        return out
    def logpdf(self, x, df, scale):
        """
        Log of the inverse Wishart probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
            Each quantile must be a symmetric positive definite matrix.
        %(_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Log of the probability density function evaluated at `x`
        Notes
        -----
        %(_doc_callparams_note)s
        """
        dim, df, scale = self._process_parameters(df, scale)
        x = self._process_quantiles(x, dim)
        _, log_det_scale = self._cholesky_logdet(scale)
        out = self._logpdf(x, dim, df, scale, log_det_scale)
        return _squeeze_output(out)
    def pdf(self, x, df, scale):
        """
        Inverse Wishart probability density function.
        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
            Each quantile must be a symmetric positive definite matrix.
        %(_doc_default_callparams)s
        Returns
        -------
        pdf : ndarray
            Probability density function evaluated at `x`
        Notes
        -----
        %(_doc_callparams_note)s
        """
        return np.exp(self.logpdf(x, df, scale))
    def _mean(self, dim, df, scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        %(_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'mean' instead.
        """
        if df > dim + 1:
            out = scale / (df - dim - 1)
        else:
            out = None
        return out
    def mean(self, df, scale):
        """
        Mean of the inverse Wishart distribution
        Only valid if the degrees of freedom are greater than the dimension of
        the scale matrix plus one.
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        mean : float or None
            The mean of the distribution
        """
        dim, df, scale = self._process_parameters(df, scale)
        out = self._mean(dim, df, scale)
        return _squeeze_output(out) if out is not None else out
    def _mode(self, dim, df, scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        %(_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'mode' instead.
        """
        return scale / (df + dim + 1)
    def mode(self, df, scale):
        """
        Mode of the inverse Wishart distribution
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        mode : float
            The Mode of the distribution
        """
        dim, df, scale = self._process_parameters(df, scale)
        out = self._mode(dim, df, scale)
        return _squeeze_output(out)
    def _var(self, dim, df, scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        %(_doc_default_callparams)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'var' instead.
        """
        if df > dim + 3:
            var = (df - dim + 1) * scale**2
            diag = scale.diagonal()  # 1 x dim array
            var += (df - dim - 1) * np.outer(diag, diag)
            var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
        else:
            var = None
        return var
    def var(self, df, scale):
        """
        Variance of the inverse Wishart distribution
        Only valid if the degrees of freedom are greater than the dimension of
        the scale matrix plus three.
        Parameters
        ----------
        %(_doc_default_callparams)s
        Returns
        -------
        var : float
            The variance of the distribution
        """
        dim, df, scale = self._process_parameters(df, scale)
        out = self._var(dim, df, scale)
        return _squeeze_output(out) if out is not None else out
    def _rvs(self, n, shape, dim, df, C, random_state):
        """
        Parameters
        ----------
        n : integer
            Number of variates to generate
        shape : iterable
            Shape of the variates to generate
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        C : ndarray
            Cholesky factorization of the scale matrix, lower triagular.
        %(_doc_random_state)s
        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'rvs' instead.
        """
        random_state = self._get_random_state(random_state)
        # Get random draws A such that A ~ W(df, I)
        A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
                                                      df, random_state)
        # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
        eye = np.eye(dim)
        trtrs = get_lapack_funcs(('trtrs'), (A,))
        for index in np.ndindex(A.shape[:-2]):
            # Calculate CA
            CA = np.dot(C, A[index])
            # Get (C A)^{-1} via triangular solver
            if dim > 1:
                CA, info = trtrs(CA, eye, lower=True)
                if info > 0:
                    raise LinAlgError("Singular matrix.")
                if info < 0:
                    raise ValueError('Illegal value in %d-th argument of'
                                     ' internal trtrs' % -info)
            else:
                CA = 1. / CA
            # Get SA
            A[index] = np.dot(CA.T, CA)
        return A
    def rvs(self, df, scale, size=1, random_state=None):
        """
        Draw random samples from an inverse Wishart distribution.
        Parameters
        ----------
        %(_doc_default_callparams)s
        size : integer or iterable of integers, optional
            Number of samples to draw (default 1).
        %(_doc_random_state)s
        Returns
        -------
        rvs : ndarray
            Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
            the dimension of the scale matrix.
        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, shape = self._process_size(size)
        dim, df, scale = self._process_parameters(df, scale)
        # Invert the scale
        eye = np.eye(dim)
        L, lower = scipy.linalg.cho_factor(scale, lower=True)
        inv_scale = scipy.linalg.cho_solve((L, lower), eye)
        # Cholesky decomposition of inverted scale
        C = scipy.linalg.cholesky(inv_scale, lower=True)
        out = self._rvs(n, shape, dim, df, C, random_state)
        return _squeeze_output(out)
    def entropy(self):
        # Need to find reference for inverse Wishart entropy
        raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
    def __init__(self, df, scale, seed=None):
        """
        Create a frozen inverse Wishart distribution.
        Parameters
        ----------
        df : array_like
            Degrees of freedom of the distribution
        scale : array_like
            Scale matrix of the distribution
        seed : None or int or np.random.RandomState instance, optional
            This parameter defines the RandomState object to use for drawing
            random variates.
            If None (or np.random), the global np.random state is used.
            If integer, it is used to seed the local RandomState instance
            Default is None.
        """
        self._dist = invwishart_gen(seed)
        self.dim, self.df, self.scale = self._dist._process_parameters(
            df, scale
        )
        # Get the determinant via Cholesky factorization
        C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
        self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
        # Get the inverse using the Cholesky factorization
        eye = np.eye(self.dim)
        self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
        # Get the Cholesky factorization of the inverse scale
        self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
    def logpdf(self, x):
        x = self._dist._process_quantiles(x, self.dim)
        out = self._dist._logpdf(x, self.dim, self.df, self.scale,
                                 self.log_det_scale)
        return _squeeze_output(out)
    def pdf(self, x):
        return np.exp(self.logpdf(x))
    def mean(self):
        out = self._dist._mean(self.dim, self.df, self.scale)
        return _squeeze_output(out) if out is not None else out
    def mode(self):
        out = self._dist._mode(self.dim, self.df, self.scale)
        return _squeeze_output(out)
    def var(self):
        out = self._dist._var(self.dim, self.df, self.scale)
        return _squeeze_output(out) if out is not None else out
    def rvs(self, size=1, random_state=None):
        n, shape = self._dist._process_size(size)
        out = self._dist._rvs(n, shape, self.dim, self.df,
                              self.C, random_state)
        return _squeeze_output(out)
    def entropy(self):
        # Need to find reference for inverse Wishart entropy
        raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
    method = invwishart_gen.__dict__[name]
    method_frozen = wishart_frozen.__dict__[name]
    method_frozen.__doc__ = doccer.docformat(
        method.__doc__, wishart_docdict_noparams)
    method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
 | 
	bsd-3-clause | 
| 
	RobertABT/heightmap | 
	build/matplotlib/examples/animation/old_animation/dynamic_image_wxagg2.py | 
	10 | 
	3037 | 
	#!/usr/bin/env python
"""
Copyright (C) 2003-2005 Jeremy O'Donoghue and others
License: This work is licensed under the PSF. A copy should be included
with this source code, and is also available at
http://www.python.org/psf/license.html
"""
import sys, time, os, gc
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import rcParams
import numpy as npy
import matplotlib.cm as cm
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from wx import *
TIMER_ID = NewId()
class PlotFigure(Frame):
    def __init__(self):
        Frame.__init__(self, None, -1, "Test embedded wxFigure")
        self.fig = Figure((5,4), 75)
        self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
        self.toolbar = NavigationToolbar2Wx(self.canvas)
        self.toolbar.Realize()
        # On Windows, default frame size behaviour is incorrect
        # you don't need this under Linux
        tw, th = self.toolbar.GetSizeTuple()
        fw, fh = self.canvas.GetSizeTuple()
        self.toolbar.SetSize(Size(fw, th))
        # Create a figure manager to manage things
        # Now put all into a sizer
        sizer = BoxSizer(VERTICAL)
        # This way of adding to sizer allows resizing
        sizer.Add(self.canvas, 1, LEFT|TOP|GROW)
        # Best to allow the toolbar to resize!
        sizer.Add(self.toolbar, 0, GROW)
        self.SetSizer(sizer)
        self.Fit()
        EVT_TIMER(self, TIMER_ID, self.onTimer)
    def init_plot_data(self):
        # jdh you can add a subplot directly from the fig rather than
        # the fig manager
        a = self.fig.add_axes([0.075,0.1,0.75,0.85])
        cax = self.fig.add_axes([0.85,0.1,0.075,0.85])
        self.x = npy.empty((120,120))
        self.x.flat = npy.arange(120.0)*2*npy.pi/120.0
        self.y = npy.empty((120,120))
        self.y.flat = npy.arange(120.0)*2*npy.pi/100.0
        self.y = npy.transpose(self.y)
        z = npy.sin(self.x) + npy.cos(self.y)
        self.im = a.imshow( z, cmap=cm.jet)#, interpolation='nearest')
        self.fig.colorbar(self.im,cax=cax,orientation='vertical')
    def GetToolBar(self):
        # You will need to override GetToolBar if you are using an
        # unmanaged toolbar in your frame
        return self.toolbar
    def onTimer(self, evt):
        self.x += npy.pi/15
        self.y += npy.pi/20
        z = npy.sin(self.x) + npy.cos(self.y)
        self.im.set_array(z)
        self.canvas.draw()
        #self.canvas.gui_repaint()  # jdh wxagg_draw calls this already
    def onEraseBackground(self, evt):
        # this is supposed to prevent redraw flicker on some X servers...
        pass
if __name__ == '__main__':
    app = PySimpleApp()
    frame = PlotFigure()
    frame.init_plot_data()
    # Initialise the timer - wxPython requires this to be connected to
    # the receiving event handler
    t = Timer(frame, TIMER_ID)
    t.Start(200)
    frame.Show()
    app.MainLoop()
 | 
	mit | 
| 
	walterreade/scikit-learn | 
	sklearn/utils/tests/test_class_weight.py | 
	50 | 
	13151 | 
	import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
    # Test (and demo) compute_class_weight.
    y = np.asarray([2, 2, 2, 3, 3, 4])
    classes = np.unique(y)
    cw = assert_warns(DeprecationWarning,
                      compute_class_weight, "auto", classes, y)
    assert_almost_equal(cw.sum(), classes.shape)
    assert_true(cw[0] < cw[1] < cw[2])
    cw = compute_class_weight("balanced", classes, y)
    # total effect of samples is preserved
    class_counts = np.bincount(y)[2:]
    assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
    assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
    # Raise error when y does not contain all class labels
    classes = np.arange(4)
    y = np.asarray([0, 0, 0, 1, 1, 2])
    assert_raises(ValueError, compute_class_weight, "auto", classes, y)
    assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
    # Raise error when y has items not in classes
    classes = np.arange(2)
    assert_raises(ValueError, compute_class_weight, "auto", classes, y)
    assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
    assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
    classes = np.arange(3)
    class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
    y = np.asarray([0, 0, 1, 2])
    cw = compute_class_weight(class_weights, classes, y)
    # When the user specifies class weights, compute_class_weights should just
    # return them.
    assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
    # When a class weight is specified that isn't in classes, a ValueError
    # should get raised
    msg = 'Class label 4 not present.'
    class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
    assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
                         classes, y)
    msg = 'Class label -1 not present.'
    class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
    assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
                         classes, y)
def test_compute_class_weight_invariance():
    # Test that results with class_weight="balanced" is invariant wrt
    # class imbalance if the number of samples is identical.
    # The test uses a balanced two class dataset with 100 datapoints.
    # It creates three versions, one where class 1 is duplicated
    # resulting in 150 points of class 1 and 50 of class 0,
    # one where there are 50 points in class 1 and 150 in class 0,
    # and one where there are 100 points of each class (this one is balanced
    # again).
    # With balancing class weights, all three should give the same model.
    X, y = make_blobs(centers=2, random_state=0)
    # create dataset where class 1 is duplicated twice
    X_1 = np.vstack([X] + [X[y == 1]] * 2)
    y_1 = np.hstack([y] + [y[y == 1]] * 2)
    # create dataset where class 0 is duplicated twice
    X_0 = np.vstack([X] + [X[y == 0]] * 2)
    y_0 = np.hstack([y] + [y[y == 0]] * 2)
    # duplicate everything
    X_ = np.vstack([X] * 2)
    y_ = np.hstack([y] * 2)
    # results should be identical
    logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
    logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
    logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
    assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
    assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
    # Test compute_class_weight when labels are negative
    # Test with balanced class labels.
    classes = np.array([-2, -1, 0])
    y = np.asarray([-1, -1, 0, 0, -2, -2])
    cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
                      classes, y)
    assert_almost_equal(cw.sum(), classes.shape)
    assert_equal(len(cw), len(classes))
    assert_array_almost_equal(cw, np.array([1., 1., 1.]))
    cw = compute_class_weight("balanced", classes, y)
    assert_equal(len(cw), len(classes))
    assert_array_almost_equal(cw, np.array([1., 1., 1.]))
    # Test with unbalanced class labels.
    y = np.asarray([-1, 0, 0, -2, -2, -2])
    cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
                      classes, y)
    assert_almost_equal(cw.sum(), classes.shape)
    assert_equal(len(cw), len(classes))
    assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
    cw = compute_class_weight("balanced", classes, y)
    assert_equal(len(cw), len(classes))
    class_counts = np.bincount(y + 2)
    assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
    assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
    # Test compute_class_weight when classes are unordered
    classes = np.array([1, 0, 3])
    y = np.asarray([1, 0, 0, 3, 3, 3])
    cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
                      classes, y)
    assert_almost_equal(cw.sum(), classes.shape)
    assert_equal(len(cw), len(classes))
    assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
    cw = compute_class_weight("balanced", classes, y)
    class_counts = np.bincount(y)[classes]
    assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
    assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
    # Test (and demo) compute_sample_weight.
    # Test with balanced classes
    y = np.asarray([1, 1, 1, 2, 2, 2])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    sample_weight = compute_sample_weight("balanced", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    # Test with user-defined weights
    sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
    assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
    # Test with column vector of balanced classes
    y = np.asarray([[1], [1], [1], [2], [2], [2]])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    sample_weight = compute_sample_weight("balanced", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    # Test with unbalanced classes
    y = np.asarray([1, 1, 1, 2, 2, 2, 3])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
    assert_array_almost_equal(sample_weight, expected_auto)
    sample_weight = compute_sample_weight("balanced", y)
    expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
    assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
    # Test with `None` weights
    sample_weight = compute_sample_weight(None, y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
    # Test with multi-output of balanced classes
    y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    sample_weight = compute_sample_weight("balanced", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    # Test with multi-output with user-defined weights
    y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
    sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
    assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
    # Test with multi-output of unbalanced classes
    y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    assert_array_almost_equal(sample_weight, expected_auto ** 2)
    sample_weight = compute_sample_weight("balanced", y)
    assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
    # Test compute_sample_weight with subsamples specified.
    # Test with balanced classes and all samples present
    y = np.asarray([1, 1, 1, 2, 2, 2])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    sample_weight = compute_sample_weight("balanced", y, range(6))
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    # Test with column vector of balanced classes and all samples present
    y = np.asarray([[1], [1], [1], [2], [2], [2]])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y)
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    sample_weight = compute_sample_weight("balanced", y, range(6))
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
    # Test with a subsample
    y = np.asarray([1, 1, 1, 2, 2, 2])
    sample_weight = assert_warns(DeprecationWarning,
                                 compute_sample_weight, "auto", y, range(4))
    assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
    sample_weight = compute_sample_weight("balanced", y, range(4))
    assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
                                              2. / 3, 2., 2., 2.])
    # Test with a bootstrap subsample
    y = np.asarray([1, 1, 1, 2, 2, 2])
    sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
                                 "auto", y, [0, 1, 1, 2, 2, 3])
    expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
    assert_array_almost_equal(sample_weight, expected_auto)
    sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
    expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
    assert_array_almost_equal(sample_weight, expected_balanced)
    # Test with a bootstrap subsample for multi-output
    y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
    sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
                                 "auto", y, [0, 1, 1, 2, 2, 3])
    assert_array_almost_equal(sample_weight, expected_auto ** 2)
    sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
    assert_array_almost_equal(sample_weight, expected_balanced ** 2)
    # Test with a missing class
    y = np.asarray([1, 1, 1, 2, 2, 2, 3])
    sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
                                 "auto", y, range(6))
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
    sample_weight = compute_sample_weight("balanced", y, range(6))
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
    # Test with a missing class for multi-output
    y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
    sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
                                 "auto", y, range(6))
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
    sample_weight = compute_sample_weight("balanced", y, range(6))
    assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
    # Test compute_sample_weight raises errors expected.
    # Invalid preset string
    y = np.asarray([1, 1, 1, 2, 2, 2])
    y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
    assert_raises(ValueError, compute_sample_weight, "ni", y)
    assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
    assert_raises(ValueError, compute_sample_weight, "ni", y_)
    assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
    # Not "auto" for subsample
    assert_raises(ValueError,
                  compute_sample_weight, {1: 2, 2: 1}, y, range(4))
    # Not a list or preset for multi-output
    assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
    # Incorrect length list for multi-output
    assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
 | 
	bsd-3-clause | 
| 
	jjhelmus/artview | 
	docs/sphinxext/numpydoc/tests/test_docscrape.py | 
	3 | 
	17864 | 
	# -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
doc_txt = '''\
  numpy.multivariate_normal(mean, cov, shape=None, spam=None)
  Draw values from a multivariate normal distribution with specified
  mean and covariance.
  The multivariate normal or Gaussian distribution is a generalisation
  of the one-dimensional normal distribution to higher dimensions.
  Parameters
  ----------
  mean : (N,) ndarray
      Mean of the N-dimensional distribution.
      .. math::
         (1+2+3)/3
  cov : (N, N) ndarray
      Covariance matrix of the distribution.
  shape : tuple of ints
      Given a shape of, for example, (m,n,k), m*n*k samples are
      generated, and packed in an m-by-n-by-k arrangement.  Because
      each sample is N-dimensional, the output shape is (m,n,k,N).
  Returns
  -------
  out : ndarray
      The drawn samples, arranged according to `shape`.  If the
      shape given is (m,n,...), then the shape of `out` is is
      (m,n,...,N).
      In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
      value drawn from the distribution.
  Other Parameters
  ----------------
  spam : parrot
      A parrot off its mortal coil.
  Raises
  ------
  RuntimeError
      Some error
  Warns
  -----
  RuntimeWarning
      Some warning
  Warnings
  --------
  Certain warnings apply.
  Notes
  -----
  Instead of specifying the full covariance matrix, popular
  approximations include:
    - Spherical covariance (`cov` is a multiple of the identity matrix)
    - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
  This geometrical property can be seen in two dimensions by plotting
  generated data-points:
  >>> mean = [0,0]
  >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
  >>> x,y = multivariate_normal(mean,cov,5000).T
  >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
  Note that the covariance matrix must be symmetric and non-negative
  definite.
  References
  ----------
  .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
         Processes," 3rd ed., McGraw-Hill Companies, 1991
  .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
         2nd ed., Wiley, 2001.
  See Also
  --------
  some, other, funcs
  otherfunc : relationship
  Examples
  --------
  >>> mean = (1,2)
  >>> cov = [[1,0],[1,0]]
  >>> x = multivariate_normal(mean,cov,(3,3))
  >>> print x.shape
  (3, 3, 2)
  The following is probably true, given that 0.6 is roughly twice the
  standard deviation:
  >>> print list( (x[0,0,:] - mean) < 0.6 )
  [True, True]
  .. index:: random
     :refguide: random;distributions, random;gauss
  '''
doc = NumpyDocString(doc_txt)
def test_signature():
    assert doc['Signature'].startswith('numpy.multivariate_normal(')
    assert doc['Signature'].endswith('spam=None)')
def test_summary():
    assert doc['Summary'][0].startswith('Draw values')
    assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
    assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
    assert_equal(len(doc['Parameters']), 3)
    assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
    arg, arg_type, desc = doc['Parameters'][1]
    assert_equal(arg_type, '(N,N) ndarray')
    assert desc[0].startswith('Covariance matrix')
    assert doc['Parameters'][0][-1][-2] == '   (1+2+3)/3'
def test_other_parameters():
    assert_equal(len(doc['Other Parameters']), 1)
    assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
    arg, arg_type, desc = doc['Other Parameters'][0]
    assert_equal(arg_type, 'parrot')
    assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
    assert_equal(len(doc['Returns']), 1)
    arg, arg_type, desc = doc['Returns'][0]
    assert_equal(arg, 'out')
    assert_equal(arg_type, 'ndarray')
    assert desc[0].startswith('The drawn samples')
    assert desc[-1].endswith('distribution.')
def test_notes():
    assert doc['Notes'][0].startswith('Instead')
    assert doc['Notes'][-1].endswith('definite.')
    assert_equal(len(doc['Notes']), 17)
def test_references():
    assert doc['References'][0].startswith('..')
    assert doc['References'][-1].endswith('2001.')
def test_examples():
    assert doc['Examples'][0].startswith('>>>')
    assert doc['Examples'][-1].endswith('True]')
def test_index():
    assert_equal(doc['index']['default'], 'random')
    assert_equal(len(doc['index']), 2)
    assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
    a = textwrap.dedent(a)
    b = textwrap.dedent(b)
    a = [l for l in a.split('\n') if l.strip()]
    b = [l for l in b.split('\n') if l.strip()]
    for n,line in enumerate(a):
        if not line == b[n]:
            raise AssertionError("Lines %s of a and b differ: "
                                 "\n>>> %s\n<<< %s\n" %
                                 (n,line,b[n]))
def test_str():
    non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
    Mean of the N-dimensional distribution.
    .. math::
       (1+2+3)/3
cov : (N, N) ndarray
    Covariance matrix of the distribution.
shape : tuple of ints
    Given a shape of, for example, (m,n,k), m*n*k samples are
    generated, and packed in an m-by-n-by-k arrangement.  Because
    each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
    The drawn samples, arranged according to `shape`.  If the
    shape given is (m,n,...), then the shape of `out` is is
    (m,n,...,N).
    In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
    value drawn from the distribution.
Other Parameters
----------------
spam : parrot
    A parrot off its mortal coil.
Raises
------
RuntimeError : 
    Some error
Warns
-----
RuntimeWarning : 
    Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
    relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
  - Spherical covariance (`cov` is a multiple of the identity matrix)
  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
       Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
       2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
   :refguide: random;distributions, random;gauss""")
def test_sphinx_str():
    sphinx_doc = SphinxDocString(doc_txt)
    non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
   single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
    **mean** : (N,) ndarray
        Mean of the N-dimensional distribution.
        .. math::
           (1+2+3)/3
    **cov** : (N,N) ndarray
        Covariance matrix of the distribution.
    **shape** : tuple of ints
        Given a shape of, for example, (m,n,k), m*n*k samples are
        generated, and packed in an m-by-n-by-k arrangement.  Because
        each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
    **out** : ndarray
        The drawn samples, arranged according to `shape`.  If the
        shape given is (m,n,...), then the shape of `out` is is
        (m,n,...,N).
        
        In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
        value drawn from the distribution.
:Other Parameters:
    **spam** : parrot
        A parrot off its mortal coil.
 
:Raises:
    **RuntimeError** : 
        Some error
:Warns:
    **RuntimeWarning** : 
        Some warning
.. warning::
    Certain warnings apply.
.. seealso::
    
    :obj:`some`, :obj:`other`, :obj:`funcs`
    
    :obj:`otherfunc`
        relationship
    
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
  - Spherical covariance (`cov` is a multiple of the identity matrix)
  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
       Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
       2nd ed., Wiley, 2001.
.. only:: latex
   [1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
       
doc2 = NumpyDocString("""
    Returns array of indices of the maximum values of along the given axis.
    Parameters
    ----------
    a : {array_like}
        Array to look in.
    axis : {None, integer}
        If None, the index is into the flattened array, otherwise along
        the specified axis""")
def test_parameters_without_extended_description():
    assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
    my_signature(*params, **kwds)
    Return this and that.
    """)
def test_escape_stars():
    signature = str(doc3).split('\n')[0]
    assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
    """a.conj()
    Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
    assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
    """
    a.something()
    Raises
    ------
    LinAlgException
        If array is singular.
    Warns
    -----
    SomeWarning
        If needed
    """)
def test_raises():
    assert_equal(len(doc5['Raises']), 1)
    name,_,desc = doc5['Raises'][0]
    assert_equal(name,'LinAlgException')
    assert_equal(desc,['If array is singular.'])
def test_warns():
    assert_equal(len(doc5['Warns']), 1)
    name,_,desc = doc5['Warns'][0]
    assert_equal(name,'SomeWarning')
    assert_equal(desc,['If needed'])
def test_see_also():
    doc6 = NumpyDocString(
    """
    z(x,theta)
    See Also
    --------
    func_a, func_b, func_c
    func_d : some equivalent func
    foo.func_e : some other func over
             multiple lines
    func_f, func_g, :meth:`func_h`, func_j,
    func_k
    :obj:`baz.obj_q`
    :class:`class_j`: fubar
        foobar
    """)
    assert len(doc6['See Also']) == 12
    for func, desc, role in doc6['See Also']:
        if func in ('func_a', 'func_b', 'func_c', 'func_f',
                    'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
            assert(not desc)
        else:
            assert(desc)
        if func == 'func_h':
            assert role == 'meth'
        elif func == 'baz.obj_q':
            assert role == 'obj'
        elif func == 'class_j':
            assert role == 'class'
        else:
            assert role is None
        if func == 'func_d':
            assert desc == ['some equivalent func']
        elif func == 'foo.func_e':
            assert desc == ['some other func over', 'multiple lines']
        elif func == 'class_j':
            assert desc == ['fubar', 'foobar']
def test_see_also_print():
    class Dummy(object):
        """
        See Also
        --------
        func_a, func_b
        func_c : some relationship
                 goes here
        func_d
        """
        pass
    obj = Dummy()
    s = str(FunctionDoc(obj, role='func'))
    assert(':func:`func_a`, :func:`func_b`' in s)
    assert('    some relationship' in s)
    assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
        Doc starts on second line.
        """)
def test_empty_first_line():
    assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
    str(SphinxDocString("""
    Parameters
    ----------"""))
def test_unicode():
    doc = SphinxDocString("""
    öäöäöäöäöåååå
    öäöäöäööäååå
    Parameters
    ----------
    ååå : äää
        ööö
    Returns
    -------
    ååå : ööö
        äää
    """)
    assert isinstance(doc['Summary'][0], str)
    if sys.version_info[0] >= 3:
        assert doc['Summary'][0] == u'öäöäöäöäöåååå'
    else:
        assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
    cfg = dict(use_plots=True)
    doc = SphinxDocString("""
    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> plt.plot([1,2,3],[4,5,6])
    >>> plt.show()
    """, config=cfg)
    assert 'plot::' in str(doc), str(doc)
    doc = SphinxDocString("""
    Examples
    --------
    .. plot::
    
       import matplotlib.pyplot as plt
       plt.plot([1,2,3],[4,5,6])
       plt.show()
    """, config=cfg)
    assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
    class Dummy(object):
        """
        Dummy class.
        """
        def spam(self, a, b):
            """Spam\n\nSpam spam."""
            pass
        def ham(self, c, d):
            """Cheese\n\nNo cheese."""
            pass
        @property
        def spammity(self):
            """Spammity index"""
            return 0.95
        class Ignorable(object):
            """local class, to be ignored"""
            pass
    for cls in (ClassDoc, SphinxClassDoc):
        doc = cls(Dummy, config=dict(show_class_members=False))
        assert 'Methods' not in str(doc), (cls, str(doc))
        assert 'spam' not in str(doc), (cls, str(doc))
        assert 'ham' not in str(doc), (cls, str(doc))
        assert 'spammity' not in str(doc), (cls, str(doc))
        assert 'Spammity index' not in str(doc), (cls, str(doc))
        doc = cls(Dummy, config=dict(show_class_members=True))
        assert 'Methods' in str(doc), (cls, str(doc))
        assert 'spam' in str(doc), (cls, str(doc))
        assert 'ham' in str(doc), (cls, str(doc))
        assert 'spammity' in str(doc), (cls, str(doc))
        if cls is SphinxClassDoc:
            assert '.. autosummary::' in str(doc), str(doc)
        else:
            assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
    # Duplicate function signatures occur e.g. in ufuncs, when the
    # automatic mechanism adds one, and a more detailed comes from the
    # docstring itself.
    doc = NumpyDocString(
    """
    z(x1, x2)
    z(a, theta)
    """)
    assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
    Foo
    Parameters
    ----------
    f : callable ``f(t, y, *f_args)``
        Aaa.
    jac : callable ``jac(t, y, *jac_args)``
        Bbb.
    Attributes
    ----------
    t : float
        Current time.
    y : ndarray
        Current variable values.
    Methods
    -------
    a
    b
    c
    Examples
    --------
    For usage examples, see `ode`.
"""
def test_class_members_doc():
    doc = ClassDoc(None, class_doc_txt)
    non_blank_line_by_line_compare(str(doc),
    """
    Foo
    Parameters
    ----------
    f : callable ``f(t, y, *f_args)``
        Aaa.
    jac : callable ``jac(t, y, *jac_args)``
        Bbb.
    Examples
    --------
    For usage examples, see `ode`.
    Attributes
    ----------
    t : float
        Current time.
    y : ndarray
        Current variable values.
    Methods
    -------
    a : 
    b : 
    c : 
    .. index:: 
    """)
def test_class_members_doc_sphinx():
    doc = SphinxClassDoc(None, class_doc_txt)
    non_blank_line_by_line_compare(str(doc),
    """
    Foo
    :Parameters:
        **f** : callable ``f(t, y, *f_args)``
            Aaa.
        **jac** : callable ``jac(t, y, *jac_args)``
            Bbb.
    .. rubric:: Examples
    For usage examples, see `ode`.
    .. rubric:: Attributes
    ===  ==========
      t  (float) Current time.  
      y  (ndarray) Current variable values.  
    ===  ==========
    .. rubric:: Methods
    ===  ==========
      a    
      b    
      c    
    ===  ==========
    """)
if __name__ == "__main__":
    import nose
    nose.run()
 | 
	bsd-3-clause | 
| 
	priorknowledge/loom | 
	examples/fox/main.py | 
	1 | 
	8590 | 
	# Copyright (c) 2014, Salesforce.com, Inc.  All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
#   notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
#   notice, this list of conditions and the following disclaimer in the
#   documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
#   may be used to endorse or promote products derived from this
#   software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import csv
import shutil
import random
from StringIO import StringIO
import numpy
import numpy.random
import scipy
import scipy.misc
import scipy.ndimage
from matplotlib import pyplot
from distributions.dbg.random import sample_discrete
from distributions.io.stream import open_compressed
import loom.tasks
import loom.query
import loom.preql
import loom.store
import loom.datasets
from loom.util import csv_reader
import parsable
parsable = parsable.Parsable()
NAME = 'fox'
ROOT = os.path.dirname(os.path.abspath(__file__))
SCHEMA = os.path.join(ROOT, 'schema.json')
DATA = os.path.join(ROOT, 'data')
RESULTS = os.path.join(ROOT, 'results')
SAMPLES = os.path.join(DATA, 'samples.csv.gz')
IMAGE = scipy.misc.imread(os.path.join(ROOT, 'fox.png'))
ROW_COUNT = 10000
PASSES = 10
EMPTY_GROUP_COUNT = 10
SIMILAR = os.path.join(DATA, 'cluster_labels.csv.gz')
X_SCALE = 2.0 / (IMAGE.shape[0] - 1)
Y_SCALE = 2.0 / (IMAGE.shape[1] - 1)
for dirname in [DATA, RESULTS]:
    if not os.path.exists(dirname):
        os.makedirs(dirname)
def to_image_coordinates(loom_x, loom_y):
    x = int(round((loom_x + 1.0) / X_SCALE))
    y = int(round((loom_y + 1.0) / Y_SCALE))
    return x, y
def to_loom_coordinates(image_x, image_y):
    x = image_x * X_SCALE - 1.0
    y = image_y * Y_SCALE - 1.0
    return x, y
def sample_from_image(image, row_count):
    image = -1.0 * image
    image -= image.min()
    x_pmf = image.sum(axis=1)
    y_pmfs = image.copy()
    for y_pmf in y_pmfs:
        y_pmf /= (y_pmf.sum() + 1e-8)
    for _ in xrange(row_count):
        x = sample_discrete(x_pmf)
        y = sample_discrete(y_pmfs[x])
        x += numpy.random.random() - 0.5
        y += numpy.random.random() - 0.5
        yield to_loom_coordinates(x, y)
def synthesize_search(name, image_pos):
    shape = IMAGE.shape
    image = IMAGE.reshape(shape[0], shape[1], 1).repeat(3, 2)
    image[image_pos] = [0, 255, 0]
    with csv_reader(SAMPLES) as reader:
        rows = list(reader)[1:]
        rows = [map(float, r) for r in rows]
    root = loom.store.get_paths(name)['root']
    with loom.preql.get_server(root) as server:
        x, y = to_loom_coordinates(*image_pos)
        search = server.search((str(x), str(y)))
    search = csv.reader(StringIO(search))
    search.next()
    for row_id, score in search:
        score = numpy.exp(float(score))
        if score < 1.:
            return image
        row_id = int(row_id.split(':')[1])
        sample_x, sample_y = rows[row_id]
        x, y = to_image_coordinates(sample_x, sample_y)
        image[x, y] = [255 * (1 - 1/score), 0, 0]
    return image
def synthesize_clusters(name, sample_count, cluster_count, pixel_count):
    with csv_reader(SAMPLES) as reader:
        reader.next()
        samples = map(tuple, reader)
        pts = random.sample(samples, sample_count)
        samples = random.sample(samples, pixel_count)
    root = loom.store.get_paths(name)['root']
    with loom.preql.get_server(root) as server:
        sample_labels = server.cluster(
            rows_to_cluster=samples,
            seed_rows=pts,
            cluster_count=cluster_count)
    labels = set(zip(*sample_labels)[0])
    label_count = max(labels) + 1
    shape = IMAGE.shape
    image = IMAGE.reshape(shape[0], shape[1], 1).repeat(3, 2)
    colors = pyplot.cm.Set1(numpy.linspace(0, 1, label_count))
    colors = (255 * colors[:, :3]).astype(numpy.uint8)
    for label, sample in sample_labels:
        x, y = to_image_coordinates(float(sample[0]), float(sample[1]))
        image[x, y] = colors[label]
    return image
def synthesize_image(name):
    print 'synthesizing image'
    width, height = IMAGE.shape
    image = numpy.zeros((width, height))
    root = loom.store.get_paths(name)['root']
    with loom.query.get_server(root) as server:
        for x in xrange(width):
            for y in xrange(height):
                xy = to_loom_coordinates(x, y)
                image[x, y] = server.score(xy)
    numpy.exp(image, out=image)
    image /= image.max()
    image -= 1.0
    image *= -255
    return image.astype(numpy.uint8)
def visualize_dataset(samples):
    width, height = IMAGE.shape
    image = numpy.zeros((width, height))
    for x, y in samples:
        x, y = to_image_coordinates(x, y)
        image[x, y] += 1
    image = scipy.ndimage.gaussian_filter(image, sigma=1)
    image *= -255.0 / image.max()
    image -= image.min()
    return image.astype(numpy.uint8)
@parsable.command
def create_dataset(row_count=ROW_COUNT):
    '''
    Extract dataset from image.
    '''
    scipy.misc.imsave(os.path.join(RESULTS, 'original.png'), IMAGE)
    print 'sampling {} points from image'.format(row_count)
    with open_compressed(SAMPLES, 'w') as f:
        writer = csv.writer(f)
        writer.writerow(['x', 'y'])
        for row in sample_from_image(IMAGE, row_count):
            writer.writerow(row)
    with csv_reader(SAMPLES) as reader:
        reader.next()
        image = visualize_dataset(map(float, row) for row in reader)
    scipy.misc.imsave(os.path.join(RESULTS, 'samples.png'), image)
@parsable.command
def compress(sample_count=1):
    '''
    Compress image using loom.
    '''
    assert os.path.exists(SAMPLES), 'first create dataset'
    print 'inferring'
    loom.tasks.ingest(NAME, SCHEMA, SAMPLES)
    loom.tasks.infer(NAME, sample_count=sample_count)
    image = synthesize_image(NAME)
    scipy.misc.imsave(os.path.join(RESULTS, 'loom.png'), image)
@parsable.command
def search(x=50, y=50):
    '''
    Demonstrate loom's search command.
    Highlight points search to the point (x, y)
    '''
    assert loom.store.get_paths(NAME)['samples'], 'first compress image'
    x = int(x)
    y = int(y)
    print 'finding points similar to {} {}'.format(x, y)
    image = synthesize_search(NAME, (x, y))
    scipy.misc.imsave(os.path.join(RESULTS, 'search.png'), image)
@parsable.command
def cluster(cluster_count=5, sample_count=1000, pixel_count=None):
    '''
    Draw a fox map
    '''
    cluster_count = int(cluster_count)
    sample_count = int(sample_count)
    if pixel_count is None:
        with csv_reader(SAMPLES) as reader:
            pixel_count = len(list(reader)) - 1
    else:
        pixel_count = int(pixel_count)
    assert loom.store.get_paths(NAME)['samples'], 'first compress image'
    image = synthesize_clusters(NAME, sample_count, cluster_count, pixel_count)
    scipy.misc.imsave(os.path.join(RESULTS, 'cluster.png'), image)
@parsable.command
def clean():
    '''
    Clean out dataset and results.
    '''
    for dirname in [DATA, RESULTS]:
        if not os.path.exists(dirname):
            shutil.rmtree(dirname)
    loom.datasets.clean(NAME)
@parsable.command
def run(row_count=ROW_COUNT, sample_count=1):
    '''
    Generate all datasets and run all algorithms.
    See index.html for results.
    '''
    create_dataset(row_count)
    compress(sample_count)
    print 'see file://{} for results'.format(os.path.join(ROOT, 'index.html'))
if __name__ == '__main__':
    parsable.dispatch()
 | 
	bsd-3-clause | 
| 
	oemof/reegis-hp | 
	reegis_hp/berlin_hp/read_data.py | 
	3 | 
	2222 | 
	import pandas as pd
import os
basic_path = '/home/uwe/chiba/RLI/data'
# wohn_gew_schul = pd.read_csv('/home/uwe/blubber.csv', ';')
# wohn_gew_schul.index += 1
# wohn_gew_schul.to_csv(os.path.join(basic_path, 'wohn_gew_schul.csv'))
#
# iwu_typen = pd.read_csv('/home/uwe/heiztyp2iwu.csv')
# iwu_typen.index += 1
# iwu_typen.to_csv(os.path.join(basic_path, 'iwu_typen.csv'))
#
# stadtstrukturtypen = pd.read_csv('/home/uwe/stadtstruk.csv', ';')
# stadtstrukturtypen.drop('heiztyp', 1, inplace=True)
# stadtstrukturtypen.index += 1
# stadtstrukturtypen.to_csv(os.path.join(basic_path, 'stadtstruktur.csv'))
iwu_typen = pd.read_csv(os.path.join(basic_path, 'iwu_typen.csv'), index_col=0)
wohn_gew_schul = pd.read_csv(
    os.path.join(basic_path, 'wohn_gew_schul.csv'), index_col=0)
stadtstrukturtypen = pd.read_csv(
    os.path.join(basic_path, 'stadtnutzung_erweitert.csv'), index_col=0)
# number_floors = pd.read_csv(
#     os.path.join(basic_path, 'number_floors_by_city_structure.csv'),
#     index_col=0)
#
# print(number_floors)
stadtstrukturtypen.fillna(0, inplace=True)
print(sum(stadtstrukturtypen.ew * stadtstrukturtypen.wohnflaeche_pro_ew))
print(sum(stadtstrukturtypen.ew))
# Todo: Script, um Stadttyp als Nummer hinzuzufügen mit Ausgabe der Typen, die
# dann keine Nummer haben
# Todo: Geschosszahl und andere fehlende Typen hinzufügen (ods-Datei) [RLI/data]
# ToDo: Verbräuche pro Gebäudetyp aus Wärmetool
# ToDo: Join infos der "Flächentypen" in Gesamtkarte
# Todo: Vergleich der Wohnfläche mit Wärmetool
# Todo: Berechnung des Wärmeverbrauchs nach Wärmetoolmethode
# ToDo Age of building by "Flächentyp"
# ToDo Berechnung des Wärmeverbrauchs nach Open_eQuarter Methode
iwu_typen['EFHv84'] *= wohn_gew_schul.Wohnungen
iwu_typen['EFHn84'] *= wohn_gew_schul.Wohnungen
iwu_typen['MFHv84'] *= wohn_gew_schul.Wohnungen
iwu_typen['MFHn84'] *= wohn_gew_schul.Wohnungen
iwu_typen['Platte'] *= wohn_gew_schul.Wohnungen
iwu_typen['Buero'] = wohn_gew_schul.Buero
iwu_typen['Schule'] = wohn_gew_schul.Schule
# heatingtypes = pd.read_csv("/home/uwe/heiztypen.csv", sep=';')
# result = result.merge(heatingtypes, on='gebaeudefu', how='inner')
# result.set_index('gid', drop=True, inplace=True)
 | 
	gpl-3.0 | 
| 
	pratapvardhan/pandas | 
	pandas/tests/io/json/test_pandas.py | 
	2 | 
	50233 | 
	# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
                           OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
                    read_json, compat)
from datetime import timedelta
import pandas as pd
import json
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
                                 assert_series_equal, network,
                                 ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
                           for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
    5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
    @pytest.fixture(scope="function", autouse=True)
    def setup(self, datapath):
        self.dirpath = datapath("io", "json", "data")
        self.ts = tm.makeTimeSeries()
        self.ts.name = 'ts'
        self.series = tm.makeStringSeries()
        self.series.name = 'series'
        self.objSeries = tm.makeObjectSeries()
        self.objSeries.name = 'objects'
        self.empty_series = Series([], index=[])
        self.empty_frame = DataFrame({})
        self.frame = _frame.copy()
        self.frame2 = _frame2.copy()
        self.intframe = _intframe.copy()
        self.tsframe = _tsframe.copy()
        self.mixed_frame = _mixed_frame.copy()
        self.categorical = _cat_frame.copy()
        yield
        del self.dirpath
        del self.ts
        del self.series
        del self.objSeries
        del self.empty_series
        del self.empty_frame
        del self.frame
        del self.frame2
        del self.intframe
        del self.tsframe
        del self.mixed_frame
    def test_frame_double_encoded_labels(self):
        df = DataFrame([['a', 'b'], ['c', 'd']],
                       index=['index " 1', 'index / 2'],
                       columns=['a \\ b', 'y / z'])
        assert_frame_equal(df, read_json(df.to_json(orient='split'),
                                         orient='split'))
        assert_frame_equal(df, read_json(df.to_json(orient='columns'),
                                         orient='columns'))
        assert_frame_equal(df, read_json(df.to_json(orient='index'),
                                         orient='index'))
        df_unser = read_json(df.to_json(orient='records'), orient='records')
        assert_index_equal(df.columns, df_unser.columns)
        tm.assert_numpy_array_equal(df.values, df_unser.values)
    def test_frame_non_unique_index(self):
        df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
                       columns=['x', 'y'])
        pytest.raises(ValueError, df.to_json, orient='index')
        pytest.raises(ValueError, df.to_json, orient='columns')
        assert_frame_equal(df, read_json(df.to_json(orient='split'),
                                         orient='split'))
        unser = read_json(df.to_json(orient='records'), orient='records')
        tm.assert_index_equal(df.columns, unser.columns)
        tm.assert_almost_equal(df.values, unser.values)
        unser = read_json(df.to_json(orient='values'), orient='values')
        tm.assert_numpy_array_equal(df.values, unser.values)
    def test_frame_non_unique_columns(self):
        df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
                       columns=['x', 'x'])
        pytest.raises(ValueError, df.to_json, orient='index')
        pytest.raises(ValueError, df.to_json, orient='columns')
        pytest.raises(ValueError, df.to_json, orient='records')
        assert_frame_equal(df, read_json(df.to_json(orient='split'),
                                         orient='split', dtype=False))
        unser = read_json(df.to_json(orient='values'), orient='values')
        tm.assert_numpy_array_equal(df.values, unser.values)
        # GH4377; duplicate columns not processing correctly
        df = DataFrame([['a', 'b'], ['c', 'd']], index=[
                       1, 2], columns=['x', 'y'])
        result = read_json(df.to_json(orient='split'), orient='split')
        assert_frame_equal(result, df)
        def _check(df):
            result = read_json(df.to_json(orient='split'), orient='split',
                               convert_dates=['x'])
            assert_frame_equal(result, df)
        for o in [[['a', 'b'], ['c', 'd']],
                  [[1.5, 2.5], [3.5, 4.5]],
                  [[1, 2.5], [3, 4.5]],
                  [[Timestamp('20130101'), 3.5],
                   [Timestamp('20130102'), 4.5]]]:
            _check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
    def test_frame_from_json_to_json(self):
        def _check_orient(df, orient, dtype=None, numpy=False,
                          convert_axes=True, check_dtype=True, raise_ok=None,
                          sort=None, check_index_type=True,
                          check_column_type=True, check_numpy_dtype=False):
            if sort is not None:
                df = df.sort_values(sort)
            else:
                df = df.sort_index()
            # if we are not unique, then check that we are raising ValueError
            # for the appropriate orients
            if not df.index.is_unique and orient in ['index', 'columns']:
                pytest.raises(
                    ValueError, lambda: df.to_json(orient=orient))
                return
            if (not df.columns.is_unique and
                    orient in ['index', 'columns', 'records']):
                pytest.raises(
                    ValueError, lambda: df.to_json(orient=orient))
                return
            dfjson = df.to_json(orient=orient)
            try:
                unser = read_json(dfjson, orient=orient, dtype=dtype,
                                  numpy=numpy, convert_axes=convert_axes)
            except Exception as detail:
                if raise_ok is not None:
                    if isinstance(detail, raise_ok):
                        return
                raise
            if sort is not None and sort in unser.columns:
                unser = unser.sort_values(sort)
            else:
                unser = unser.sort_index()
            if dtype is False:
                check_dtype = False
            if not convert_axes and df.index.dtype.type == np.datetime64:
                unser.index = DatetimeIndex(
                    unser.index.values.astype('i8') * 1e6)
            if orient == "records":
                # index is not captured in this orientation
                tm.assert_almost_equal(df.values, unser.values,
                                       check_dtype=check_numpy_dtype)
                tm.assert_index_equal(df.columns, unser.columns,
                                      exact=check_column_type)
            elif orient == "values":
                # index and cols are not captured in this orientation
                if numpy is True and df.shape == (0, 0):
                    assert unser.shape[0] == 0
                else:
                    tm.assert_almost_equal(df.values, unser.values,
                                           check_dtype=check_numpy_dtype)
            elif orient == "split":
                # index and col labels might not be strings
                unser.index = [str(i) for i in unser.index]
                unser.columns = [str(i) for i in unser.columns]
                if sort is None:
                    unser = unser.sort_index()
                tm.assert_almost_equal(df.values, unser.values,
                                       check_dtype=check_numpy_dtype)
            else:
                if convert_axes:
                    tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
                                          check_index_type=check_index_type,
                                          check_column_type=check_column_type)
                else:
                    tm.assert_frame_equal(df, unser, check_less_precise=False,
                                          check_dtype=check_dtype)
        def _check_all_orients(df, dtype=None, convert_axes=True,
                               raise_ok=None, sort=None, check_index_type=True,
                               check_column_type=True):
            # numpy=False
            if convert_axes:
                _check_orient(df, "columns", dtype=dtype, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "records", dtype=dtype, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "split", dtype=dtype, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "index", dtype=dtype, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "values", dtype=dtype, sort=sort,
                              check_index_type=False, check_column_type=False)
            _check_orient(df, "columns", dtype=dtype,
                          convert_axes=False, sort=sort)
            _check_orient(df, "records", dtype=dtype,
                          convert_axes=False, sort=sort)
            _check_orient(df, "split", dtype=dtype,
                          convert_axes=False, sort=sort)
            _check_orient(df, "index", dtype=dtype,
                          convert_axes=False, sort=sort)
            _check_orient(df, "values", dtype=dtype,
                          convert_axes=False, sort=sort)
            # numpy=True and raise_ok might be not None, so ignore the error
            if convert_axes:
                _check_orient(df, "columns", dtype=dtype, numpy=True,
                              raise_ok=raise_ok, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "records", dtype=dtype, numpy=True,
                              raise_ok=raise_ok, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "split", dtype=dtype, numpy=True,
                              raise_ok=raise_ok, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "index", dtype=dtype, numpy=True,
                              raise_ok=raise_ok, sort=sort,
                              check_index_type=False, check_column_type=False)
                _check_orient(df, "values", dtype=dtype, numpy=True,
                              raise_ok=raise_ok, sort=sort,
                              check_index_type=False, check_column_type=False)
            _check_orient(df, "columns", dtype=dtype, numpy=True,
                          convert_axes=False, raise_ok=raise_ok, sort=sort)
            _check_orient(df, "records", dtype=dtype, numpy=True,
                          convert_axes=False, raise_ok=raise_ok, sort=sort)
            _check_orient(df, "split", dtype=dtype, numpy=True,
                          convert_axes=False, raise_ok=raise_ok, sort=sort)
            _check_orient(df, "index", dtype=dtype, numpy=True,
                          convert_axes=False, raise_ok=raise_ok, sort=sort)
            _check_orient(df, "values", dtype=dtype, numpy=True,
                          convert_axes=False, raise_ok=raise_ok, sort=sort)
        # basic
        _check_all_orients(self.frame)
        assert self.frame.to_json() == self.frame.to_json(orient="columns")
        _check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
        _check_all_orients(self.intframe, dtype=False)
        # big one
        # index and columns are strings as all unserialised JSON object keys
        # are assumed to be strings
        biggie = DataFrame(np.zeros((200, 4)),
                           columns=[str(i) for i in range(4)],
                           index=[str(i) for i in range(200)])
        _check_all_orients(biggie, dtype=False, convert_axes=False)
        # dtypes
        _check_all_orients(DataFrame(biggie, dtype=np.float64),
                           dtype=np.float64, convert_axes=False)
        _check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
                           convert_axes=False)
        _check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
                           convert_axes=False, raise_ok=ValueError)
        # categorical
        _check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
        # empty
        _check_all_orients(self.empty_frame, check_index_type=False,
                           check_column_type=False)
        # time series data
        _check_all_orients(self.tsframe)
        # mixed data
        index = pd.Index(['a', 'b', 'c', 'd', 'e'])
        data = {'A': [0., 1., 2., 3., 4.],
                'B': [0., 1., 0., 1., 0.],
                'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
                'D': [True, False, True, False, True]}
        df = DataFrame(data=data, index=index)
        _check_orient(df, "split", check_dtype=False)
        _check_orient(df, "records", check_dtype=False)
        _check_orient(df, "values", check_dtype=False)
        _check_orient(df, "columns", check_dtype=False)
        # index oriented is problematic as it is read back in in a transposed
        # state, so the columns are interpreted as having mixed data and
        # given object dtypes.
        # force everything to have object dtype beforehand
        _check_orient(df.transpose().transpose(), "index", dtype=False)
    def test_frame_from_json_bad_data(self):
        pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
        # too few indices
        json = StringIO('{"columns":["A","B"],'
                        '"index":["2","3"],'
                        '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
        pytest.raises(ValueError, read_json, json,
                      orient="split")
        # too many columns
        json = StringIO('{"columns":["A","B","C"],'
                        '"index":["1","2","3"],'
                        '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
        pytest.raises(AssertionError, read_json, json,
                      orient="split")
        # bad key
        json = StringIO('{"badkey":["A","B"],'
                        '"index":["2","3"],'
                        '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
        with tm.assert_raises_regex(ValueError,
                                    r"unexpected key\(s\): badkey"):
            read_json(json, orient="split")
    def test_frame_from_json_nones(self):
        df = DataFrame([[1, 2], [4, 5, 6]])
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        df = DataFrame([['1', '2'], ['4', '5', '6']])
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert unser[2][0] is None
        unser = read_json(df.to_json(), convert_axes=False, dtype=False)
        assert unser['2']['0'] is None
        unser = read_json(df.to_json(), numpy=False)
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), numpy=False, dtype=False)
        assert unser[2][0] is None
        unser = read_json(df.to_json(), numpy=False,
                          convert_axes=False, dtype=False)
        assert unser['2']['0'] is None
        # infinities get mapped to nulls which get mapped to NaNs during
        # deserialisation
        df = DataFrame([[1, 2], [4, 5, 6]])
        df.loc[0, 2] = np.inf
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert np.isnan(unser[2][0])
        df.loc[0, 2] = np.NINF
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert np.isnan(unser[2][0])
    @pytest.mark.skipif(is_platform_32bit(),
                        reason="not compliant on 32-bit, xref #15865")
    def test_frame_to_json_float_precision(self):
        df = pd.DataFrame([dict(a_float=0.95)])
        encoded = df.to_json(double_precision=1)
        assert encoded == '{"a_float":{"0":1.0}}'
        df = pd.DataFrame([dict(a_float=1.95)])
        encoded = df.to_json(double_precision=1)
        assert encoded == '{"a_float":{"0":2.0}}'
        df = pd.DataFrame([dict(a_float=-1.95)])
        encoded = df.to_json(double_precision=1)
        assert encoded == '{"a_float":{"0":-2.0}}'
        df = pd.DataFrame([dict(a_float=0.995)])
        encoded = df.to_json(double_precision=2)
        assert encoded == '{"a_float":{"0":1.0}}'
        df = pd.DataFrame([dict(a_float=0.9995)])
        encoded = df.to_json(double_precision=3)
        assert encoded == '{"a_float":{"0":1.0}}'
        df = pd.DataFrame([dict(a_float=0.99999999999999944)])
        encoded = df.to_json(double_precision=15)
        assert encoded == '{"a_float":{"0":1.0}}'
    def test_frame_to_json_except(self):
        df = DataFrame([1, 2, 3])
        pytest.raises(ValueError, df.to_json, orient="garbage")
    def test_frame_empty(self):
        df = DataFrame(columns=['jim', 'joe'])
        assert not df._is_mixed_type
        assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
                           check_index_type=False)
        # GH 7445
        result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
        expected = '{"test":{}}'
        assert result == expected
    def test_frame_empty_mixedtype(self):
        # mixed type
        df = DataFrame(columns=['jim', 'joe'])
        df['joe'] = df['joe'].astype('i8')
        assert df._is_mixed_type
        assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
                           check_index_type=False)
    def test_frame_mixedtype_orient(self):  # GH10289
        vals = [[10, 1, 'foo', .1, .01],
                [20, 2, 'bar', .2, .02],
                [30, 3, 'baz', .3, .03],
                [40, 4, 'qux', .4, .04]]
        df = DataFrame(vals, index=list('abcd'),
                       columns=['1st', '2nd', '3rd', '4th', '5th'])
        assert df._is_mixed_type
        right = df.copy()
        for orient in ['split', 'index', 'columns']:
            inp = df.to_json(orient=orient)
            left = read_json(inp, orient=orient, convert_axes=False)
            assert_frame_equal(left, right)
        right.index = np.arange(len(df))
        inp = df.to_json(orient='records')
        left = read_json(inp, orient='records', convert_axes=False)
        assert_frame_equal(left, right)
        right.columns = np.arange(df.shape[1])
        inp = df.to_json(orient='values')
        left = read_json(inp, orient='values', convert_axes=False)
        assert_frame_equal(left, right)
    def test_v12_compat(self):
        df = DataFrame(
            [[1.56808523, 0.65727391, 1.81021139, -0.17251653],
             [-0.2550111, -0.08072427, -0.03202878, -0.17581665],
             [1.51493992, 0.11805825, 1.629455, -1.31506612],
             [-0.02765498, 0.44679743, 0.33192641, -0.27885413],
             [0.05951614, -2.69652057, 1.28163262, 0.34703478]],
            columns=['A', 'B', 'C', 'D'],
            index=pd.date_range('2000-01-03', '2000-01-07'))
        df['date'] = pd.Timestamp('19920106 18:21:32.12')
        df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
        df['modified'] = df['date']
        df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
        v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
        df_unser = pd.read_json(v12_json)
        assert_frame_equal(df, df_unser)
        df_iso = df.drop(['modified'], axis=1)
        v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
        df_unser_iso = pd.read_json(v12_iso_json)
        assert_frame_equal(df_iso, df_unser_iso)
    def test_blocks_compat_GH9037(self):
        index = pd.date_range('20000101', periods=10, freq='H')
        df_mixed = DataFrame(OrderedDict(
            float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
                     -0.60316077, 0.24653374, 0.28668979, -2.51969012,
                     0.95748401, -1.02970536],
            int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
                   40314334, 21290235, 4991321, 41903419, 16008365],
            str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
                   'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
            float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
                     -0.48217572, 0.86229683, 1.08935819, 0.93898739,
                     -0.03030452, 1.43366348],
            str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
                   '08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
            int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
                   34193846, 10561746, 24867120, 76131025]
        ), index=index)
        # JSON deserialisation always creates unicode strings
        df_mixed.columns = df_mixed.columns.astype('unicode')
        df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
                                    orient='split')
        assert_frame_equal(df_mixed, df_roundtrip,
                           check_index_type=True,
                           check_column_type=True,
                           check_frame_type=True,
                           by_blocks=True,
                           check_exact=True)
    def test_frame_nonprintable_bytes(self):
        # GH14256: failing column caused segfaults, if it is not the last one
        class BinaryThing(object):
            def __init__(self, hexed):
                self.hexed = hexed
                if compat.PY2:
                    self.binary = hexed.decode('hex')
                else:
                    self.binary = bytes.fromhex(hexed)
            def __str__(self):
                return self.hexed
        hexed = '574b4454ba8c5eb4f98a8f45'
        binthing = BinaryThing(hexed)
        # verify the proper conversion of printable content
        df_printable = DataFrame({'A': [binthing.hexed]})
        assert df_printable.to_json() == \
            '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
        # check if non-printable content throws appropriate Exception
        df_nonprintable = DataFrame({'A': [binthing]})
        with pytest.raises(OverflowError):
            df_nonprintable.to_json()
        # the same with multiple columns threw segfaults
        df_mixed = DataFrame({'A': [binthing], 'B': [1]},
                             columns=['A', 'B'])
        with pytest.raises(OverflowError):
            df_mixed.to_json()
        # default_handler should resolve exceptions for non-string types
        assert df_nonprintable.to_json(default_handler=str) == \
            '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
        assert df_mixed.to_json(default_handler=str) == \
            '{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
    def test_label_overflow(self):
        # GH14256: buffer length not checked when writing label
        df = pd.DataFrame({'bar' * 100000: [1], 'foo': [1337]})
        assert df.to_json() == \
            '{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
                bar=('bar' * 100000))
    def test_series_non_unique_index(self):
        s = Series(['a', 'b'], index=[1, 1])
        pytest.raises(ValueError, s.to_json, orient='index')
        assert_series_equal(s, read_json(s.to_json(orient='split'),
                                         orient='split', typ='series'))
        unser = read_json(s.to_json(orient='records'),
                          orient='records', typ='series')
        tm.assert_numpy_array_equal(s.values, unser.values)
    def test_series_from_json_to_json(self):
        def _check_orient(series, orient, dtype=None, numpy=False,
                          check_index_type=True):
            series = series.sort_index()
            unser = read_json(series.to_json(orient=orient),
                              typ='series', orient=orient, numpy=numpy,
                              dtype=dtype)
            unser = unser.sort_index()
            if orient == "records" or orient == "values":
                assert_almost_equal(series.values, unser.values)
            else:
                if orient == "split":
                    assert_series_equal(series, unser,
                                        check_index_type=check_index_type)
                else:
                    assert_series_equal(series, unser, check_names=False,
                                        check_index_type=check_index_type)
        def _check_all_orients(series, dtype=None, check_index_type=True):
            _check_orient(series, "columns", dtype=dtype,
                          check_index_type=check_index_type)
            _check_orient(series, "records", dtype=dtype,
                          check_index_type=check_index_type)
            _check_orient(series, "split", dtype=dtype,
                          check_index_type=check_index_type)
            _check_orient(series, "index", dtype=dtype,
                          check_index_type=check_index_type)
            _check_orient(series, "values", dtype=dtype)
            _check_orient(series, "columns", dtype=dtype, numpy=True,
                          check_index_type=check_index_type)
            _check_orient(series, "records", dtype=dtype, numpy=True,
                          check_index_type=check_index_type)
            _check_orient(series, "split", dtype=dtype, numpy=True,
                          check_index_type=check_index_type)
            _check_orient(series, "index", dtype=dtype, numpy=True,
                          check_index_type=check_index_type)
            _check_orient(series, "values", dtype=dtype, numpy=True,
                          check_index_type=check_index_type)
        # basic
        _check_all_orients(self.series)
        assert self.series.to_json() == self.series.to_json(orient="index")
        objSeries = Series([str(d) for d in self.objSeries],
                           index=self.objSeries.index,
                           name=self.objSeries.name)
        _check_all_orients(objSeries, dtype=False)
        # empty_series has empty index with object dtype
        # which cannot be revert
        assert self.empty_series.index.dtype == np.object_
        _check_all_orients(self.empty_series, check_index_type=False)
        _check_all_orients(self.ts)
        # dtype
        s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
        _check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
        _check_all_orients(Series(s, dtype=np.int), dtype=np.int)
    def test_series_to_json_except(self):
        s = Series([1, 2, 3])
        pytest.raises(ValueError, s.to_json, orient="garbage")
    def test_series_from_json_precise_float(self):
        s = Series([4.56, 4.56, 4.56])
        result = read_json(s.to_json(), typ='series', precise_float=True)
        assert_series_equal(result, s, check_index_type=False)
    def test_frame_from_json_precise_float(self):
        df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
        result = read_json(df.to_json(), precise_float=True)
        assert_frame_equal(result, df, check_index_type=False,
                           check_column_type=False)
    def test_typ(self):
        s = Series(lrange(6), index=['a', 'b', 'c',
                                     'd', 'e', 'f'], dtype='int64')
        result = read_json(s.to_json(), typ=None)
        assert_series_equal(result, s)
    def test_reconstruction_index(self):
        df = DataFrame([[1, 2, 3], [4, 5, 6]])
        result = read_json(df.to_json())
        assert_frame_equal(result, df)
        df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
        result = read_json(df.to_json())
        assert_frame_equal(result, df)
    def test_path(self):
        with ensure_clean('test.json') as path:
            for df in [self.frame, self.frame2, self.intframe, self.tsframe,
                       self.mixed_frame]:
                df.to_json(path)
                read_json(path)
    def test_axis_dates(self):
        # frame
        json = self.tsframe.to_json()
        result = read_json(json)
        assert_frame_equal(result, self.tsframe)
        # series
        json = self.ts.to_json()
        result = read_json(json, typ='series')
        assert_series_equal(result, self.ts, check_names=False)
        assert result.name is None
    def test_convert_dates(self):
        # frame
        df = self.tsframe.copy()
        df['date'] = Timestamp('20130101')
        json = df.to_json()
        result = read_json(json)
        assert_frame_equal(result, df)
        df['foo'] = 1.
        json = df.to_json(date_unit='ns')
        result = read_json(json, convert_dates=False)
        expected = df.copy()
        expected['date'] = expected['date'].values.view('i8')
        expected['foo'] = expected['foo'].astype('int64')
        assert_frame_equal(result, expected)
        # series
        ts = Series(Timestamp('20130101'), index=self.ts.index)
        json = ts.to_json()
        result = read_json(json, typ='series')
        assert_series_equal(result, ts)
    def test_convert_dates_infer(self):
        # GH10747
        from pandas.io.json import dumps
        infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
                       'modified', 'timestamp', 'timestamps']
        for infer_word in infer_words:
            data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
            expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
                                 columns=['id', infer_word])
            result = read_json(dumps(data))[['id', infer_word]]
            assert_frame_equal(result, expected)
    def test_date_format_frame(self):
        df = self.tsframe.copy()
        def test_w_date(date, date_unit=None):
            df['date'] = Timestamp(date)
            df.iloc[1, df.columns.get_loc('date')] = pd.NaT
            df.iloc[5, df.columns.get_loc('date')] = pd.NaT
            if date_unit:
                json = df.to_json(date_format='iso', date_unit=date_unit)
            else:
                json = df.to_json(date_format='iso')
            result = read_json(json)
            assert_frame_equal(result, df)
        test_w_date('20130101 20:43:42.123')
        test_w_date('20130101 20:43:42', date_unit='s')
        test_w_date('20130101 20:43:42.123', date_unit='ms')
        test_w_date('20130101 20:43:42.123456', date_unit='us')
        test_w_date('20130101 20:43:42.123456789', date_unit='ns')
        pytest.raises(ValueError, df.to_json, date_format='iso',
                      date_unit='foo')
    def test_date_format_series(self):
        def test_w_date(date, date_unit=None):
            ts = Series(Timestamp(date), index=self.ts.index)
            ts.iloc[1] = pd.NaT
            ts.iloc[5] = pd.NaT
            if date_unit:
                json = ts.to_json(date_format='iso', date_unit=date_unit)
            else:
                json = ts.to_json(date_format='iso')
            result = read_json(json, typ='series')
            assert_series_equal(result, ts)
        test_w_date('20130101 20:43:42.123')
        test_w_date('20130101 20:43:42', date_unit='s')
        test_w_date('20130101 20:43:42.123', date_unit='ms')
        test_w_date('20130101 20:43:42.123456', date_unit='us')
        test_w_date('20130101 20:43:42.123456789', date_unit='ns')
        ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
        pytest.raises(ValueError, ts.to_json, date_format='iso',
                      date_unit='foo')
    def test_date_unit(self):
        df = self.tsframe.copy()
        df['date'] = Timestamp('20130101 20:43:42')
        dl = df.columns.get_loc('date')
        df.iloc[1, dl] = Timestamp('19710101 20:43:42')
        df.iloc[2, dl] = Timestamp('21460101 20:43:42')
        df.iloc[4, dl] = pd.NaT
        for unit in ('s', 'ms', 'us', 'ns'):
            json = df.to_json(date_format='epoch', date_unit=unit)
            # force date unit
            result = read_json(json, date_unit=unit)
            assert_frame_equal(result, df)
            # detect date unit
            result = read_json(json, date_unit=None)
            assert_frame_equal(result, df)
    def test_weird_nested_json(self):
        # this used to core dump the parser
        s = r'''{
        "status": "success",
        "data": {
        "posts": [
            {
            "id": 1,
            "title": "A blog post",
            "body": "Some useful content"
            },
            {
            "id": 2,
            "title": "Another blog post",
            "body": "More content"
            }
           ]
          }
        }'''
        read_json(s)
    def test_doc_example(self):
        dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
        dfj2['date'] = Timestamp('20130101')
        dfj2['ints'] = lrange(5)
        dfj2['bools'] = True
        dfj2.index = pd.date_range('20130101', periods=5)
        json = dfj2.to_json()
        result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
        assert_frame_equal(result, result)
    def test_misc_example(self):
        # parsing unordered input fails
        result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
        expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
        error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]:  Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
        with tm.assert_raises_regex(AssertionError, error_msg):
            assert_frame_equal(result, expected, check_index_type=False)
        result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
        expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
        assert_frame_equal(result, expected)
    @network
    def test_round_trip_exception_(self):
        # GH 3867
        csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
        df = pd.read_csv(csv)
        s = df.to_json()
        result = pd.read_json(s)
        assert_frame_equal(result.reindex(
            index=df.index, columns=df.columns), df)
    @network
    def test_url(self):
        url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5'  # noqa
        result = read_json(url, convert_dates=True)
        for c in ['created_at', 'closed_at', 'updated_at']:
            assert result[c].dtype == 'datetime64[ns]'
    def test_timedelta(self):
        converter = lambda x: pd.to_timedelta(x, unit='ms')
        s = Series([timedelta(23), timedelta(seconds=5)])
        assert s.dtype == 'timedelta64[ns]'
        result = pd.read_json(s.to_json(), typ='series').apply(converter)
        assert_series_equal(result, s)
        s = Series([timedelta(23), timedelta(seconds=5)],
                   index=pd.Index([0, 1]))
        assert s.dtype == 'timedelta64[ns]'
        result = pd.read_json(s.to_json(), typ='series').apply(converter)
        assert_series_equal(result, s)
        frame = DataFrame([timedelta(23), timedelta(seconds=5)])
        assert frame[0].dtype == 'timedelta64[ns]'
        assert_frame_equal(frame, pd.read_json(frame.to_json())
                           .apply(converter))
        frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
                           'b': [1, 2],
                           'c': pd.date_range(start='20130101', periods=2)})
        result = pd.read_json(frame.to_json(date_unit='ns'))
        result['a'] = pd.to_timedelta(result.a, unit='ns')
        result['c'] = pd.to_datetime(result.c)
        assert_frame_equal(frame, result)
    def test_mixed_timedelta_datetime(self):
        frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
                          dtype=object)
        expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
                                    pd.Timestamp(frame.a[1]).value]})
        result = pd.read_json(frame.to_json(date_unit='ns'),
                              dtype={'a': 'int64'})
        assert_frame_equal(result, expected, check_index_type=False)
    def test_default_handler(self):
        value = object()
        frame = DataFrame({'a': [7, value]})
        expected = DataFrame({'a': [7, str(value)]})
        result = pd.read_json(frame.to_json(default_handler=str))
        assert_frame_equal(expected, result, check_index_type=False)
    def test_default_handler_indirect(self):
        from pandas.io.json import dumps
        def default(obj):
            if isinstance(obj, complex):
                return [('mathjs', 'Complex'),
                        ('re', obj.real),
                        ('im', obj.imag)]
            return str(obj)
        df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
                                 'b': [float('nan'), None, 'N/A']},
                                columns=['a', 'b'])]
        expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
                    '["re",4.0],["im",-5.0]],"N\\/A"]]]')
        assert dumps(df_list, default_handler=default,
                     orient="values") == expected
    def test_default_handler_numpy_unsupported_dtype(self):
        # GH12554 to_json raises 'Unhandled numpy dtype 15'
        df = DataFrame({'a': [1, 2.3, complex(4, -5)],
                        'b': [float('nan'), None, complex(1.2, 0)]},
                       columns=['a', 'b'])
        expected = ('[["(1+0j)","(nan+0j)"],'
                    '["(2.3+0j)","(nan+0j)"],'
                    '["(4-5j)","(1.2+0j)"]]')
        assert df.to_json(default_handler=str, orient="values") == expected
    def test_default_handler_raises(self):
        def my_handler_raises(obj):
            raise TypeError("raisin")
        pytest.raises(TypeError,
                      DataFrame({'a': [1, 2, object()]}).to_json,
                      default_handler=my_handler_raises)
        pytest.raises(TypeError,
                      DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
                      default_handler=my_handler_raises)
    def test_categorical(self):
        # GH4377 df.to_json segfaults with non-ndarray blocks
        df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
        df["B"] = df["A"]
        expected = df.to_json()
        df["B"] = df["A"].astype('category')
        assert expected == df.to_json()
        s = df["A"]
        sc = df["B"]
        assert s.to_json() == sc.to_json()
    def test_datetime_tz(self):
        # GH4377 df.to_json segfaults with non-ndarray blocks
        tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
        tz_naive = tz_range.tz_convert('utc').tz_localize(None)
        df = DataFrame({
            'A': tz_range,
            'B': pd.date_range('20130101', periods=3)})
        df_naive = df.copy()
        df_naive['A'] = tz_naive
        expected = df_naive.to_json()
        assert expected == df.to_json()
        stz = Series(tz_range)
        s_naive = Series(tz_naive)
        assert stz.to_json() == s_naive.to_json()
    def test_sparse(self):
        # GH4377 df.to_json segfaults with non-ndarray blocks
        df = pd.DataFrame(np.random.randn(10, 4))
        df.loc[:8] = np.nan
        sdf = df.to_sparse()
        expected = df.to_json()
        assert expected == sdf.to_json()
        s = pd.Series(np.random.randn(10))
        s.loc[:8] = np.nan
        ss = s.to_sparse()
        expected = s.to_json()
        assert expected == ss.to_json()
    def test_tz_is_utc(self):
        from pandas.io.json import dumps
        exp = '"2013-01-10T05:00:00.000Z"'
        ts = Timestamp('2013-01-10 05:00:00Z')
        assert dumps(ts, iso_dates=True) == exp
        dt = ts.to_pydatetime()
        assert dumps(dt, iso_dates=True) == exp
        ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
        assert dumps(ts, iso_dates=True) == exp
        dt = ts.to_pydatetime()
        assert dumps(dt, iso_dates=True) == exp
        ts = Timestamp('2013-01-10 00:00:00-0500')
        assert dumps(ts, iso_dates=True) == exp
        dt = ts.to_pydatetime()
        assert dumps(dt, iso_dates=True) == exp
    def test_tz_range_is_utc(self):
        from pandas.io.json import dumps
        exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
        dfexp = ('{"DT":{'
                 '"0":"2013-01-01T05:00:00.000Z",'
                 '"1":"2013-01-02T05:00:00.000Z"}}')
        tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
        assert dumps(tz_range, iso_dates=True) == exp
        dti = pd.DatetimeIndex(tz_range)
        assert dumps(dti, iso_dates=True) == exp
        df = DataFrame({'DT': dti})
        assert dumps(df, iso_dates=True) == dfexp
        tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
                                 tz='US/Eastern')
        assert dumps(tz_range, iso_dates=True) == exp
        dti = pd.DatetimeIndex(tz_range)
        assert dumps(dti, iso_dates=True) == exp
        df = DataFrame({'DT': dti})
        assert dumps(df, iso_dates=True) == dfexp
        tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
        assert dumps(tz_range, iso_dates=True) == exp
        dti = pd.DatetimeIndex(tz_range)
        assert dumps(dti, iso_dates=True) == exp
        df = DataFrame({'DT': dti})
        assert dumps(df, iso_dates=True) == dfexp
    def test_read_inline_jsonl(self):
        # GH9180
        result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
        expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
        assert_frame_equal(result, expected)
    def test_read_s3_jsonl(self, s3_resource):
        # GH17200
        result = read_json('s3n://pandas-test/items.jsonl', lines=True)
        expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
        assert_frame_equal(result, expected)
    def test_read_local_jsonl(self):
        # GH17200
        with ensure_clean('tmp_items.json') as path:
            with open(path, 'w') as infile:
                infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
            result = read_json(path, lines=True)
            expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
            assert_frame_equal(result, expected)
    def test_read_jsonl_unicode_chars(self):
        # GH15132: non-ascii unicode characters
        # \u201d == RIGHT DOUBLE QUOTATION MARK
        # simulate file handle
        json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
        json = StringIO(json)
        result = read_json(json, lines=True)
        expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
                             columns=['a', 'b'])
        assert_frame_equal(result, expected)
        # simulate string
        json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
        result = read_json(json, lines=True)
        expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
                             columns=['a', 'b'])
        assert_frame_equal(result, expected)
    def test_read_json_large_numbers(self):
        # GH18842
        json = '{"articleId": "1404366058080022500245"}'
        json = StringIO(json)
        result = read_json(json, typ="series")
        expected = Series(1.404366e+21, index=['articleId'])
        assert_series_equal(result, expected)
        json = '{"0": {"articleId": "1404366058080022500245"}}'
        json = StringIO(json)
        result = read_json(json)
        expected = DataFrame(1.404366e+21, index=['articleId'], columns=[0])
        assert_frame_equal(result, expected)
    def test_to_jsonl(self):
        # GH9180
        df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
        result = df.to_json(orient="records", lines=True)
        expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
        assert result == expected
        df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
        result = df.to_json(orient="records", lines=True)
        expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
        assert result == expected
        assert_frame_equal(pd.read_json(result, lines=True), df)
        # GH15096: escaped characters in columns and data
        df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]],
                       columns=["a\\", 'b'])
        result = df.to_json(orient="records", lines=True)
        expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n'
                    '{"a\\\\":"foo\\"","b":"bar"}')
        assert result == expected
        assert_frame_equal(pd.read_json(result, lines=True), df)
    def test_latin_encoding(self):
        if compat.PY2:
            tm.assert_raises_regex(
                TypeError, r'\[unicode\] is not implemented as a table column')
            return
        # GH 13774
        pytest.skip("encoding not implemented in .to_json(), "
                    "xref #13774")
        values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
                  [b'E\xc9, 17', b'a', b'b', b'c'],
                  [b'EE, 17', b'', b'a', b'b', b'c'],
                  [b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
                  [b'', b'a', b'b', b'c'],
                  [b'\xf8\xfc', b'a', b'b', b'c'],
                  [b'A\xf8\xfc', b'', b'a', b'b', b'c'],
                  [np.nan, b'', b'b', b'c'],
                  [b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
        def _try_decode(x, encoding='latin-1'):
            try:
                return x.decode(encoding)
            except AttributeError:
                return x
        # not sure how to remove latin-1 from code in python 2 and 3
        values = [[_try_decode(x) for x in y] for y in values]
        examples = []
        for dtype in ['category', object]:
            for val in values:
                examples.append(Series(val, dtype=dtype))
        def roundtrip(s, encoding='latin-1'):
            with ensure_clean('test.json') as path:
                s.to_json(path, encoding=encoding)
                retr = read_json(path, encoding=encoding)
                assert_series_equal(s, retr, check_categorical=False)
        for s in examples:
            roundtrip(s)
    def test_data_frame_size_after_to_json(self):
        # GH15344
        df = DataFrame({'a': [str(1)]})
        size_before = df.memory_usage(index=True, deep=True).sum()
        df.to_json()
        size_after = df.memory_usage(index=True, deep=True).sum()
        assert size_before == size_after
    @pytest.mark.parametrize('data, expected', [
        (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
            {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
        (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo'),
            {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
        (DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
                   index=[['a', 'b'], ['c', 'd']]),
            {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
        (Series([1, 2, 3], name='A'),
            {'name': 'A', 'data': [1, 2, 3]}),
        (Series([1, 2, 3], name='A').rename_axis('foo'),
            {'name': 'A', 'data': [1, 2, 3]}),
        (Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']]),
            {'name': 'A', 'data': [1, 2]}),
    ])
    def test_index_false_to_json_split(self, data, expected):
        # GH 17394
        # Testing index=False in to_json with orient='split'
        result = data.to_json(orient='split', index=False)
        result = json.loads(result)
        assert result == expected
    @pytest.mark.parametrize('data', [
        (DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])),
        (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo')),
        (DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
                   index=[['a', 'b'], ['c', 'd']])),
        (Series([1, 2, 3], name='A')),
        (Series([1, 2, 3], name='A').rename_axis('foo')),
        (Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']])),
    ])
    def test_index_false_to_json_table(self, data):
        # GH 17394
        # Testing index=False in to_json with orient='table'
        result = data.to_json(orient='table', index=False)
        result = json.loads(result)
        expected = {
            'schema': pd.io.json.build_table_schema(data, index=False),
            'data': DataFrame(data).to_dict(orient='records')
        }
        assert result == expected
    @pytest.mark.parametrize('orient', [
        'records', 'index', 'columns', 'values'
    ])
    def test_index_false_error_to_json(self, orient):
        # GH 17394
        # Testing error message from to_json with index=False
        df = pd.DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])
        with tm.assert_raises_regex(ValueError, "'index=False' is only "
                                                "valid when 'orient' is "
                                                "'split' or 'table'"):
            df.to_json(orient=orient, index=False)
 | 
	bsd-3-clause | 
| 
	russel1237/scikit-learn | 
	sklearn/linear_model/least_angle.py | 
	61 | 
	54324 | 
	"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
#         Alexandre Gramfort <[email protected]>
#         Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
    solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
              alpha_min=0, method='lar', copy_X=True,
              eps=np.finfo(np.float).eps,
              copy_Gram=True, verbose=0, return_path=True,
              return_n_iter=False, positive=False):
    """Compute Least Angle Regression or Lasso path using LARS algorithm [1]
    The optimization objective for the case method='lasso' is::
    (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
    in the case of method='lars', the objective function is only known in
    the form of an implicit equation (see discussion in [1])
    Read more in the :ref:`User Guide <least_angle_regression>`.
    Parameters
    -----------
    X : array, shape: (n_samples, n_features)
        Input data.
    y : array, shape: (n_samples)
        Input targets.
    positive : boolean (default=False)
        Restrict coefficients to be >= 0.
        When using this option together with method 'lasso' the model
        coefficients will not converge to the ordinary-least-squares solution
        for small values of alpha (neither will they when using method 'lar'
        ..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
        0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
        algorithm are typically in congruence with the solution of the
        coordinate descent lasso_path function.
    max_iter : integer, optional (default=500)
        Maximum number of iterations to perform, set to infinity for no limit.
    Gram : None, 'auto', array, shape: (n_features, n_features), optional
        Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
        matrix is precomputed from the given X, if there are more samples
        than features.
    alpha_min : float, optional (default=0)
        Minimum correlation along the path. It corresponds to the
        regularization parameter alpha parameter in the Lasso.
    method : {'lar', 'lasso'}, optional (default='lar')
        Specifies the returned model. Select ``'lar'`` for Least Angle
        Regression, ``'lasso'`` for the Lasso.
    eps : float, optional (default=``np.finfo(np.float).eps``)
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems.
    copy_X : bool, optional (default=True)
        If ``False``, ``X`` is overwritten.
    copy_Gram : bool, optional (default=True)
        If ``False``, ``Gram`` is overwritten.
    verbose : int (default=0)
        Controls output verbosity.
    return_path : bool, optional (default=True)
        If ``return_path==True`` returns the entire path, else returns only the
        last point of the path.
    return_n_iter : bool, optional (default=False)
        Whether to return the number of iterations.
    Returns
    --------
    alphas : array, shape: [n_alphas + 1]
        Maximum of covariances (in absolute value) at each iteration.
        ``n_alphas`` is either ``max_iter``, ``n_features`` or the
        number of nodes in the path with ``alpha >= alpha_min``, whichever
        is smaller.
    active : array, shape [n_alphas]
        Indices of active variables at the end of the path.
    coefs : array, shape (n_features, n_alphas + 1)
        Coefficients along the path
    n_iter : int
        Number of iterations run. Returned only if return_n_iter is set
        to True.
    See also
    --------
    lasso_path
    LassoLars
    Lars
    LassoLarsCV
    LarsCV
    sklearn.decomposition.sparse_encode
    References
    ----------
    .. [1] "Least Angle Regression", Effron et al.
           http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
    .. [2] `Wikipedia entry on the Least-angle regression
           <http://en.wikipedia.org/wiki/Least-angle_regression>`_
    .. [3] `Wikipedia entry on the Lasso
           <http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
    """
    n_features = X.shape[1]
    n_samples = y.size
    max_features = min(max_iter, n_features)
    if return_path:
        coefs = np.zeros((max_features + 1, n_features))
        alphas = np.zeros(max_features + 1)
    else:
        coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
        alpha, prev_alpha = np.array([0.]), np.array([0.])  # better ideas?
    n_iter, n_active = 0, 0
    active, indices = list(), np.arange(n_features)
    # holds the sign of covariance
    sign_active = np.empty(max_features, dtype=np.int8)
    drop = False
    # will hold the cholesky factorization. Only lower part is
    # referenced.
    # We are initializing this to "zeros" and not empty, because
    # it is passed to scipy linalg functions and thus if it has NaNs,
    # even if they are in the upper part that it not used, we
    # get errors raised.
    # Once we support only scipy > 0.12 we can use check_finite=False and
    # go back to "empty"
    L = np.zeros((max_features, max_features), dtype=X.dtype)
    swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
    solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
    if Gram is None:
        if copy_X:
            # force copy. setting the array to be fortran-ordered
            # speeds up the calculation of the (partial) Gram matrix
            # and allows to easily swap columns
            X = X.copy('F')
    elif Gram == 'auto':
        Gram = None
        if X.shape[0] > X.shape[1]:
            Gram = np.dot(X.T, X)
    elif copy_Gram:
        Gram = Gram.copy()
    if Xy is None:
        Cov = np.dot(X.T, y)
    else:
        Cov = Xy.copy()
    if verbose:
        if verbose > 1:
            print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
        else:
            sys.stdout.write('.')
            sys.stdout.flush()
    tiny = np.finfo(np.float).tiny  # to avoid division by 0 warning
    tiny32 = np.finfo(np.float32).tiny  # to avoid division by 0 warning
    equality_tolerance = np.finfo(np.float32).eps
    while True:
        if Cov.size:
            if positive:
                C_idx = np.argmax(Cov)
            else:
                C_idx = np.argmax(np.abs(Cov))
            C_ = Cov[C_idx]
            if positive:
                C = C_
            else:
                C = np.fabs(C_)
        else:
            C = 0.
        if return_path:
            alpha = alphas[n_iter, np.newaxis]
            coef = coefs[n_iter]
            prev_alpha = alphas[n_iter - 1, np.newaxis]
            prev_coef = coefs[n_iter - 1]
        alpha[0] = C / n_samples
        if alpha[0] <= alpha_min + equality_tolerance:  # early stopping
            if abs(alpha[0] - alpha_min) > equality_tolerance:
                # interpolation factor 0 <= ss < 1
                if n_iter > 0:
                    # In the first iteration, all alphas are zero, the formula
                    # below would make ss a NaN
                    ss = ((prev_alpha[0] - alpha_min) /
                          (prev_alpha[0] - alpha[0]))
                    coef[:] = prev_coef + ss * (coef - prev_coef)
                alpha[0] = alpha_min
            if return_path:
                coefs[n_iter] = coef
            break
        if n_iter >= max_iter or n_active >= n_features:
            break
        if not drop:
            ##########################################################
            # Append x_j to the Cholesky factorization of (Xa * Xa') #
            #                                                        #
            #            ( L   0 )                                   #
            #     L  ->  (       )  , where L * w = Xa' x_j          #
            #            ( w   z )    and z = ||x_j||                #
            #                                                        #
            ##########################################################
            if positive:
                sign_active[n_active] = np.ones_like(C_)
            else:
                sign_active[n_active] = np.sign(C_)
            m, n = n_active, C_idx + n_active
            Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
            indices[n], indices[m] = indices[m], indices[n]
            Cov_not_shortened = Cov
            Cov = Cov[1:]  # remove Cov[0]
            if Gram is None:
                X.T[n], X.T[m] = swap(X.T[n], X.T[m])
                c = nrm2(X.T[n_active]) ** 2
                L[n_active, :n_active] = \
                    np.dot(X.T[n_active], X.T[:n_active].T)
            else:
                # swap does only work inplace if matrix is fortran
                # contiguous ...
                Gram[m], Gram[n] = swap(Gram[m], Gram[n])
                Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
                c = Gram[n_active, n_active]
                L[n_active, :n_active] = Gram[n_active, :n_active]
            # Update the cholesky decomposition for the Gram matrix
            if n_active:
                linalg.solve_triangular(L[:n_active, :n_active],
                                        L[n_active, :n_active],
                                        trans=0, lower=1,
                                        overwrite_b=True,
                                        **solve_triangular_args)
            v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
            diag = max(np.sqrt(np.abs(c - v)), eps)
            L[n_active, n_active] = diag
            if diag < 1e-7:
                # The system is becoming too ill-conditioned.
                # We have degenerate vectors in our active set.
                # We'll 'drop for good' the last regressor added.
                # Note: this case is very rare. It is no longer triggered by the
                # test suite. The `equality_tolerance` margin added in 0.16.0 to
                # get early stopping to work consistently on all versions of
                # Python including 32 bit Python under Windows seems to make it
                # very difficult to trigger the 'drop for good' strategy.
                warnings.warn('Regressors in active set degenerate. '
                              'Dropping a regressor, after %i iterations, '
                              'i.e. alpha=%.3e, '
                              'with an active set of %i regressors, and '
                              'the smallest cholesky pivot element being %.3e'
                              % (n_iter, alpha, n_active, diag),
                              ConvergenceWarning)
                # XXX: need to figure a 'drop for good' way
                Cov = Cov_not_shortened
                Cov[0] = 0
                Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
                continue
            active.append(indices[n_active])
            n_active += 1
            if verbose > 1:
                print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
                                                      n_active, C))
        if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
            # alpha is increasing. This is because the updates of Cov are
            # bringing in too much numerical error that is greater than
            # than the remaining correlation with the
            # regressors. Time to bail out
            warnings.warn('Early stopping the lars path, as the residues '
                          'are small and the current value of alpha is no '
                          'longer well controlled. %i iterations, alpha=%.3e, '
                          'previous alpha=%.3e, with an active set of %i '
                          'regressors.'
                          % (n_iter, alpha, prev_alpha, n_active),
                          ConvergenceWarning)
            break
        # least squares solution
        least_squares, info = solve_cholesky(L[:n_active, :n_active],
                                             sign_active[:n_active],
                                             lower=True)
        if least_squares.size == 1 and least_squares == 0:
            # This happens because sign_active[:n_active] = 0
            least_squares[...] = 1
            AA = 1.
        else:
            # is this really needed ?
            AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
            if not np.isfinite(AA):
                # L is too ill-conditioned
                i = 0
                L_ = L[:n_active, :n_active].copy()
                while not np.isfinite(AA):
                    L_.flat[::n_active + 1] += (2 ** i) * eps
                    least_squares, info = solve_cholesky(
                        L_, sign_active[:n_active], lower=True)
                    tmp = max(np.sum(least_squares * sign_active[:n_active]),
                              eps)
                    AA = 1. / np.sqrt(tmp)
                    i += 1
            least_squares *= AA
        if Gram is None:
            # equiangular direction of variables in the active set
            eq_dir = np.dot(X.T[:n_active].T, least_squares)
            # correlation between each unactive variables and
            # eqiangular vector
            corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
        else:
            # if huge number of features, this takes 50% of time, I
            # think could be avoided if we just update it using an
            # orthogonal (QR) decomposition of X
            corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
                                 least_squares)
        g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
        if positive:
            gamma_ = min(g1, C / AA)
        else:
            g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
            gamma_ = min(g1, g2, C / AA)
        # TODO: better names for these variables: z
        drop = False
        z = -coef[active] / (least_squares + tiny32)
        z_pos = arrayfuncs.min_pos(z)
        if z_pos < gamma_:
            # some coefficients have changed sign
            idx = np.where(z == z_pos)[0][::-1]
            # update the sign, important for LAR
            sign_active[idx] = -sign_active[idx]
            if method == 'lasso':
                gamma_ = z_pos
            drop = True
        n_iter += 1
        if return_path:
            if n_iter >= coefs.shape[0]:
                del coef, alpha, prev_alpha, prev_coef
                # resize the coefs and alphas array
                add_features = 2 * max(1, (max_features - n_active))
                coefs = np.resize(coefs, (n_iter + add_features, n_features))
                alphas = np.resize(alphas, n_iter + add_features)
            coef = coefs[n_iter]
            prev_coef = coefs[n_iter - 1]
            alpha = alphas[n_iter, np.newaxis]
            prev_alpha = alphas[n_iter - 1, np.newaxis]
        else:
            # mimic the effect of incrementing n_iter on the array references
            prev_coef = coef
            prev_alpha[0] = alpha[0]
            coef = np.zeros_like(coef)
        coef[active] = prev_coef[active] + gamma_ * least_squares
        # update correlations
        Cov -= gamma_ * corr_eq_dir
        # See if any coefficient has changed sign
        if drop and method == 'lasso':
            # handle the case when idx is not length of 1
            [arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
                idx]
            n_active -= 1
            m, n = idx, n_active
            # handle the case when idx is not length of 1
            drop_idx = [active.pop(ii) for ii in idx]
            if Gram is None:
                # propagate dropped variable
                for ii in idx:
                    for i in range(ii, n_active):
                        X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
                        # yeah this is stupid
                        indices[i], indices[i + 1] = indices[i + 1], indices[i]
                # TODO: this could be updated
                residual = y - np.dot(X[:, :n_active], coef[active])
                temp = np.dot(X.T[n_active], residual)
                Cov = np.r_[temp, Cov]
            else:
                for ii in idx:
                    for i in range(ii, n_active):
                        indices[i], indices[i + 1] = indices[i + 1], indices[i]
                        Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
                        Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
                                                          Gram[:, i + 1])
                # Cov_n = Cov_j + x_j * X + increment(betas) TODO:
                # will this still work with multiple drops ?
                # recompute covariance. Probably could be done better
                # wrong as Xy is not swapped with the rest of variables
                # TODO: this could be updated
                residual = y - np.dot(X, coef)
                temp = np.dot(X.T[drop_idx], residual)
                Cov = np.r_[temp, Cov]
            sign_active = np.delete(sign_active, idx)
            sign_active = np.append(sign_active, 0.)  # just to maintain size
            if verbose > 1:
                print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
                                                      n_active, abs(temp)))
    if return_path:
        # resize coefs in case of early stop
        alphas = alphas[:n_iter + 1]
        coefs = coefs[:n_iter + 1]
        if return_n_iter:
            return alphas, active, coefs.T, n_iter
        else:
            return alphas, active, coefs.T
    else:
        if return_n_iter:
            return alpha, active, coef, n_iter
        else:
            return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
    """Least Angle Regression model a.k.a. LAR
    Read more in the :ref:`User Guide <least_angle_regression>`.
    Parameters
    ----------
    n_nonzero_coefs : int, optional
        Target number of non-zero coefficients. Use ``np.inf`` for no limit.
    fit_intercept : boolean
        Whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations
        (e.g. data is expected to be already centered).
    positive : boolean (default=False)
        Restrict coefficients to be >= 0. Be aware that you might want to
        remove fit_intercept which is set True by default.
    verbose : boolean or integer, optional
        Sets the verbosity amount
    normalize : boolean, optional, default False
        If ``True``, the regressors X will be normalized before regression.
    precompute : True | False | 'auto' | array-like
        Whether to use a precomputed Gram matrix to speed up
        calculations. If set to ``'auto'`` let us decide. The Gram
        matrix can also be passed as argument.
    copy_X : boolean, optional, default True
        If ``True``, X will be copied; else, it may be overwritten.
    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Unlike the ``tol`` parameter in some iterative
        optimization-based algorithms, this parameter does not control
        the tolerance of the optimization.
    fit_path : boolean
        If True the full path is stored in the ``coef_path_`` attribute.
        If you compute the solution for a large problem or many targets,
        setting ``fit_path`` to ``False`` will lead to a speedup, especially
        with a small alpha.
    Attributes
    ----------
    alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
        Maximum of covariances (in absolute value) at each iteration. \
        ``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
        whichever is smaller.
    active_ : list, length = n_alphas | list of n_targets such lists
        Indices of active variables at the end of the path.
    coef_path_ : array, shape (n_features, n_alphas + 1) \
        | list of n_targets such arrays
        The varying values of the coefficients along the path. It is not
        present if the ``fit_path`` parameter is ``False``.
    coef_ : array, shape (n_features,) or (n_targets, n_features)
        Parameter vector (w in the formulation formula).
    intercept_ : float | array, shape (n_targets,)
        Independent term in decision function.
    n_iter_ : array-like or int
        The number of iterations taken by lars_path to find the
        grid of alphas for each target.
    Examples
    --------
    >>> from sklearn import linear_model
    >>> clf = linear_model.Lars(n_nonzero_coefs=1)
    >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
    ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
    Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
       n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
       verbose=False)
    >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
    [ 0. -1.11...]
    See also
    --------
    lars_path, LarsCV
    sklearn.decomposition.sparse_encode
    """
    def __init__(self, fit_intercept=True, verbose=False, normalize=True,
                 precompute='auto', n_nonzero_coefs=500,
                 eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
                 positive=False):
        self.fit_intercept = fit_intercept
        self.verbose = verbose
        self.normalize = normalize
        self.method = 'lar'
        self.precompute = precompute
        self.n_nonzero_coefs = n_nonzero_coefs
        self.positive = positive
        self.eps = eps
        self.copy_X = copy_X
        self.fit_path = fit_path
    def _get_gram(self):
        # precompute if n_samples > n_features
        precompute = self.precompute
        if hasattr(precompute, '__array__'):
            Gram = precompute
        elif precompute == 'auto':
            Gram = 'auto'
        else:
            Gram = None
        return Gram
    def fit(self, X, y, Xy=None):
        """Fit the model using X, y as training data.
        parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.
        y : array-like, shape (n_samples,) or (n_samples, n_targets)
            Target values.
        Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
                optional
            Xy = np.dot(X.T, y) that can be precomputed. It is useful
            only when the Gram matrix is precomputed.
        returns
        -------
        self : object
            returns an instance of self.
        """
        X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
        n_features = X.shape[1]
        X, y, X_mean, y_mean, X_std = self._center_data(X, y,
                                                        self.fit_intercept,
                                                        self.normalize,
                                                        self.copy_X)
        if y.ndim == 1:
            y = y[:, np.newaxis]
        n_targets = y.shape[1]
        alpha = getattr(self, 'alpha', 0.)
        if hasattr(self, 'n_nonzero_coefs'):
            alpha = 0.  # n_nonzero_coefs parametrization takes priority
            max_iter = self.n_nonzero_coefs
        else:
            max_iter = self.max_iter
        precompute = self.precompute
        if not hasattr(precompute, '__array__') and (
                precompute is True or
                (precompute == 'auto' and X.shape[0] > X.shape[1]) or
                (precompute == 'auto' and y.shape[1] > 1)):
            Gram = np.dot(X.T, X)
        else:
            Gram = self._get_gram()
        self.alphas_ = []
        self.n_iter_ = []
        if self.fit_path:
            self.coef_ = []
            self.active_ = []
            self.coef_path_ = []
            for k in xrange(n_targets):
                this_Xy = None if Xy is None else Xy[:, k]
                alphas, active, coef_path, n_iter_ = lars_path(
                    X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
                    copy_Gram=True, alpha_min=alpha, method=self.method,
                    verbose=max(0, self.verbose - 1), max_iter=max_iter,
                    eps=self.eps, return_path=True,
                    return_n_iter=True, positive=self.positive)
                self.alphas_.append(alphas)
                self.active_.append(active)
                self.n_iter_.append(n_iter_)
                self.coef_path_.append(coef_path)
                self.coef_.append(coef_path[:, -1])
            if n_targets == 1:
                self.alphas_, self.active_, self.coef_path_, self.coef_ = [
                    a[0] for a in (self.alphas_, self.active_, self.coef_path_,
                                   self.coef_)]
                self.n_iter_ = self.n_iter_[0]
        else:
            self.coef_ = np.empty((n_targets, n_features))
            for k in xrange(n_targets):
                this_Xy = None if Xy is None else Xy[:, k]
                alphas, _, self.coef_[k], n_iter_ = lars_path(
                    X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
                    copy_Gram=True, alpha_min=alpha, method=self.method,
                    verbose=max(0, self.verbose - 1), max_iter=max_iter,
                    eps=self.eps, return_path=False, return_n_iter=True,
                    positive=self.positive)
                self.alphas_.append(alphas)
                self.n_iter_.append(n_iter_)
            if n_targets == 1:
                self.alphas_ = self.alphas_[0]
                self.n_iter_ = self.n_iter_[0]
        self._set_intercept(X_mean, y_mean, X_std)
        return self
class LassoLars(Lars):
    """Lasso model fit with Least Angle Regression a.k.a. Lars
    It is a Linear Model trained with an L1 prior as regularizer.
    The optimization objective for Lasso is::
    (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
    Read more in the :ref:`User Guide <least_angle_regression>`.
    Parameters
    ----------
    alpha : float
        Constant that multiplies the penalty term. Defaults to 1.0.
        ``alpha = 0`` is equivalent to an ordinary least square, solved
        by :class:`LinearRegression`. For numerical reasons, using
        ``alpha = 0`` with the LassoLars object is not advised and you
        should prefer the LinearRegression object.
    fit_intercept : boolean
        whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations
        (e.g. data is expected to be already centered).
    positive : boolean (default=False)
        Restrict coefficients to be >= 0. Be aware that you might want to
        remove fit_intercept which is set True by default.
        Under the positive restriction the model coefficients will not converge
        to the ordinary-least-squares solution for small values of alpha.
        Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
        0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
        algorithm are typically in congruence with the solution of the
        coordinate descent Lasso estimator.
    verbose : boolean or integer, optional
        Sets the verbosity amount
    normalize : boolean, optional, default False
        If True, the regressors X will be normalized before regression.
    copy_X : boolean, optional, default True
        If True, X will be copied; else, it may be overwritten.
    precompute : True | False | 'auto' | array-like
        Whether to use a precomputed Gram matrix to speed up
        calculations. If set to ``'auto'`` let us decide. The Gram
        matrix can also be passed as argument.
    max_iter : integer, optional
        Maximum number of iterations to perform.
    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Unlike the ``tol`` parameter in some iterative
        optimization-based algorithms, this parameter does not control
        the tolerance of the optimization.
    fit_path : boolean
        If ``True`` the full path is stored in the ``coef_path_`` attribute.
        If you compute the solution for a large problem or many targets,
        setting ``fit_path`` to ``False`` will lead to a speedup, especially
        with a small alpha.
    Attributes
    ----------
    alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
        Maximum of covariances (in absolute value) at each iteration. \
        ``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
        nodes in the path with correlation greater than ``alpha``, whichever \
        is smaller.
    active_ : list, length = n_alphas | list of n_targets such lists
        Indices of active variables at the end of the path.
    coef_path_ : array, shape (n_features, n_alphas + 1) or list
        If a list is passed it's expected to be one of n_targets such arrays.
        The varying values of the coefficients along the path. It is not
        present if the ``fit_path`` parameter is ``False``.
    coef_ : array, shape (n_features,) or (n_targets, n_features)
        Parameter vector (w in the formulation formula).
    intercept_ : float | array, shape (n_targets,)
        Independent term in decision function.
    n_iter_ : array-like or int.
        The number of iterations taken by lars_path to find the
        grid of alphas for each target.
    Examples
    --------
    >>> from sklearn import linear_model
    >>> clf = linear_model.LassoLars(alpha=0.01)
    >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
    ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
    LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
         fit_path=True, max_iter=500, normalize=True, positive=False,
         precompute='auto', verbose=False)
    >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
    [ 0.         -0.963257...]
    See also
    --------
    lars_path
    lasso_path
    Lasso
    LassoCV
    LassoLarsCV
    sklearn.decomposition.sparse_encode
    """
    def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
                 normalize=True, precompute='auto', max_iter=500,
                 eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
                 positive=False):
        self.alpha = alpha
        self.fit_intercept = fit_intercept
        self.max_iter = max_iter
        self.verbose = verbose
        self.normalize = normalize
        self.method = 'lasso'
        self.positive = positive
        self.precompute = precompute
        self.copy_X = copy_X
        self.eps = eps
        self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
    if copy or not array.flags.writeable:
        return array.copy()
    return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
                        copy=True, method='lars', verbose=False,
                        fit_intercept=True, normalize=True, max_iter=500,
                        eps=np.finfo(np.float).eps, positive=False):
    """Compute the residues on left-out data for a full LARS path
    Parameters
    -----------
    X_train : array, shape (n_samples, n_features)
        The data to fit the LARS on
    y_train : array, shape (n_samples)
        The target variable to fit LARS on
    X_test : array, shape (n_samples, n_features)
        The data to compute the residues on
    y_test : array, shape (n_samples)
        The target variable to compute the residues on
    Gram : None, 'auto', array, shape: (n_features, n_features), optional
        Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
        matrix is precomputed from the given X, if there are more samples
        than features
    copy : boolean, optional
        Whether X_train, X_test, y_train and y_test should be copied;
        if False, they may be overwritten.
    method : 'lar' | 'lasso'
        Specifies the returned model. Select ``'lar'`` for Least Angle
        Regression, ``'lasso'`` for the Lasso.
    verbose : integer, optional
        Sets the amount of verbosity
    fit_intercept : boolean
        whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations
        (e.g. data is expected to be already centered).
    positive : boolean (default=False)
        Restrict coefficients to be >= 0. Be aware that you might want to
        remove fit_intercept which is set True by default.
        See reservations for using this option in combination with method
        'lasso' for expected small values of alpha in the doc of LassoLarsCV
        and LassoLarsIC.
    normalize : boolean, optional, default False
        If True, the regressors X will be normalized before regression.
    max_iter : integer, optional
        Maximum number of iterations to perform.
    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Unlike the ``tol`` parameter in some iterative
        optimization-based algorithms, this parameter does not control
        the tolerance of the optimization.
    Returns
    --------
    alphas : array, shape (n_alphas,)
        Maximum of covariances (in absolute value) at each iteration.
        ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
        is smaller.
    active : list
        Indices of active variables at the end of the path.
    coefs : array, shape (n_features, n_alphas)
        Coefficients along the path
    residues : array, shape (n_alphas, n_samples)
        Residues of the prediction on the test data
    """
    X_train = _check_copy_and_writeable(X_train, copy)
    y_train = _check_copy_and_writeable(y_train, copy)
    X_test = _check_copy_and_writeable(X_test, copy)
    y_test = _check_copy_and_writeable(y_test, copy)
    if fit_intercept:
        X_mean = X_train.mean(axis=0)
        X_train -= X_mean
        X_test -= X_mean
        y_mean = y_train.mean(axis=0)
        y_train = as_float_array(y_train, copy=False)
        y_train -= y_mean
        y_test = as_float_array(y_test, copy=False)
        y_test -= y_mean
    if normalize:
        norms = np.sqrt(np.sum(X_train ** 2, axis=0))
        nonzeros = np.flatnonzero(norms)
        X_train[:, nonzeros] /= norms[nonzeros]
    alphas, active, coefs = lars_path(
        X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
        method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
        positive=positive)
    if normalize:
        coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
    residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
    return alphas, active, coefs, residues.T
class LarsCV(Lars):
    """Cross-validated Least Angle Regression model
    Read more in the :ref:`User Guide <least_angle_regression>`.
    Parameters
    ----------
    fit_intercept : boolean
        whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations
        (e.g. data is expected to be already centered).
    positive : boolean (default=False)
        Restrict coefficients to be >= 0. Be aware that you might want to
        remove fit_intercept which is set True by default.
    verbose : boolean or integer, optional
        Sets the verbosity amount
    normalize : boolean, optional, default False
        If True, the regressors X will be normalized before regression.
    copy_X : boolean, optional, default True
        If ``True``, X will be copied; else, it may be overwritten.
    precompute : True | False | 'auto' | array-like
        Whether to use a precomputed Gram matrix to speed up
        calculations. If set to ``'auto'`` let us decide. The Gram
        matrix can also be passed as argument.
    max_iter: integer, optional
        Maximum number of iterations to perform.
    cv : int, cross-validation generator or an iterable, optional
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:
          - None, to use the default 3-fold cross-validation,
          - integer, to specify the number of folds.
          - An object to be used as a cross-validation generator.
          - An iterable yielding train/test splits.
        For integer/None inputs, :class:`KFold` is used.
        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.
    max_n_alphas : integer, optional
        The maximum number of points on the path used to compute the
        residuals in the cross-validation
    n_jobs : integer, optional
        Number of CPUs to use during the cross validation. If ``-1``, use
        all the CPUs
    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems.
    Attributes
    ----------
    coef_ : array, shape (n_features,)
        parameter vector (w in the formulation formula)
    intercept_ : float
        independent term in decision function
    coef_path_ : array, shape (n_features, n_alphas)
        the varying values of the coefficients along the path
    alpha_ : float
        the estimated regularization parameter alpha
    alphas_ : array, shape (n_alphas,)
        the different values of alpha along the path
    cv_alphas_ : array, shape (n_cv_alphas,)
        all the values of alpha along the path for the different folds
    cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
        the mean square error on left-out for each fold along the path
        (alpha values given by ``cv_alphas``)
    n_iter_ : array-like or int
        the number of iterations run by Lars with the optimal alpha.
    See also
    --------
    lars_path, LassoLars, LassoLarsCV
    """
    method = 'lar'
    def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
                 normalize=True, precompute='auto', cv=None,
                 max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
                 copy_X=True, positive=False):
        self.fit_intercept = fit_intercept
        self.positive = positive
        self.max_iter = max_iter
        self.verbose = verbose
        self.normalize = normalize
        self.precompute = precompute
        self.copy_X = copy_X
        self.cv = cv
        self.max_n_alphas = max_n_alphas
        self.n_jobs = n_jobs
        self.eps = eps
    def fit(self, X, y):
        """Fit the model using X, y as training data.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.
        y : array-like, shape (n_samples,)
            Target values.
        Returns
        -------
        self : object
            returns an instance of self.
        """
        self.fit_path = True
        X, y = check_X_y(X, y, y_numeric=True)
        # init cross-validation generator
        cv = check_cv(self.cv, X, y, classifier=False)
        Gram = 'auto' if self.precompute else None
        cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
            delayed(_lars_path_residues)(
                X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
                method=self.method, verbose=max(0, self.verbose - 1),
                normalize=self.normalize, fit_intercept=self.fit_intercept,
                max_iter=self.max_iter, eps=self.eps, positive=self.positive)
            for train, test in cv)
        all_alphas = np.concatenate(list(zip(*cv_paths))[0])
        # Unique also sorts
        all_alphas = np.unique(all_alphas)
        # Take at most max_n_alphas values
        stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
        all_alphas = all_alphas[::stride]
        mse_path = np.empty((len(all_alphas), len(cv_paths)))
        for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
            alphas = alphas[::-1]
            residues = residues[::-1]
            if alphas[0] != 0:
                alphas = np.r_[0, alphas]
                residues = np.r_[residues[0, np.newaxis], residues]
            if alphas[-1] != all_alphas[-1]:
                alphas = np.r_[alphas, all_alphas[-1]]
                residues = np.r_[residues, residues[-1, np.newaxis]]
            this_residues = interpolate.interp1d(alphas,
                                                 residues,
                                                 axis=0)(all_alphas)
            this_residues **= 2
            mse_path[:, index] = np.mean(this_residues, axis=-1)
        mask = np.all(np.isfinite(mse_path), axis=-1)
        all_alphas = all_alphas[mask]
        mse_path = mse_path[mask]
        # Select the alpha that minimizes left-out error
        i_best_alpha = np.argmin(mse_path.mean(axis=-1))
        best_alpha = all_alphas[i_best_alpha]
        # Store our parameters
        self.alpha_ = best_alpha
        self.cv_alphas_ = all_alphas
        self.cv_mse_path_ = mse_path
        # Now compute the full model
        # it will call a lasso internally when self if LassoLarsCV
        # as self.method == 'lasso'
        Lars.fit(self, X, y)
        return self
    @property
    def alpha(self):
        # impedance matching for the above Lars.fit (should not be documented)
        return self.alpha_
class LassoLarsCV(LarsCV):
    """Cross-validated Lasso, using the LARS algorithm
    The optimization objective for Lasso is::
    (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
    Read more in the :ref:`User Guide <least_angle_regression>`.
    Parameters
    ----------
    fit_intercept : boolean
        whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations
        (e.g. data is expected to be already centered).
    positive : boolean (default=False)
        Restrict coefficients to be >= 0. Be aware that you might want to
        remove fit_intercept which is set True by default.
        Under the positive restriction the model coefficients do not converge
        to the ordinary-least-squares solution for small values of alpha.
        Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
        0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
        algorithm are typically in congruence with the solution of the
        coordinate descent Lasso estimator.
        As a consequence using LassoLarsCV only makes sense for problems where
        a sparse solution is expected and/or reached.
    verbose : boolean or integer, optional
        Sets the verbosity amount
    normalize : boolean, optional, default False
        If True, the regressors X will be normalized before regression.
    precompute : True | False | 'auto' | array-like
        Whether to use a precomputed Gram matrix to speed up
        calculations. If set to ``'auto'`` let us decide. The Gram
        matrix can also be passed as argument.
    max_iter : integer, optional
        Maximum number of iterations to perform.
    cv : int, cross-validation generator or an iterable, optional
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:
          - None, to use the default 3-fold cross-validation,
          - integer, to specify the number of folds.
          - An object to be used as a cross-validation generator.
          - An iterable yielding train/test splits.
        For integer/None inputs, :class:`KFold` is used.
        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.
    max_n_alphas : integer, optional
        The maximum number of points on the path used to compute the
        residuals in the cross-validation
    n_jobs : integer, optional
        Number of CPUs to use during the cross validation. If ``-1``, use
        all the CPUs
    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems.
    copy_X : boolean, optional, default True
        If True, X will be copied; else, it may be overwritten.
    Attributes
    ----------
    coef_ : array, shape (n_features,)
        parameter vector (w in the formulation formula)
    intercept_ : float
        independent term in decision function.
    coef_path_ : array, shape (n_features, n_alphas)
        the varying values of the coefficients along the path
    alpha_ : float
        the estimated regularization parameter alpha
    alphas_ : array, shape (n_alphas,)
        the different values of alpha along the path
    cv_alphas_ : array, shape (n_cv_alphas,)
        all the values of alpha along the path for the different folds
    cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
        the mean square error on left-out for each fold along the path
        (alpha values given by ``cv_alphas``)
    n_iter_ : array-like or int
        the number of iterations run by Lars with the optimal alpha.
    Notes
    -----
    The object solves the same problem as the LassoCV object. However,
    unlike the LassoCV, it find the relevant alphas values by itself.
    In general, because of this property, it will be more stable.
    However, it is more fragile to heavily multicollinear datasets.
    It is more efficient than the LassoCV if only a small number of
    features are selected compared to the total number, for instance if
    there are very few samples compared to the number of features.
    See also
    --------
    lars_path, LassoLars, LarsCV, LassoCV
    """
    method = 'lasso'
class LassoLarsIC(LassoLars):
    """Lasso model fit with Lars using BIC or AIC for model selection
    The optimization objective for Lasso is::
    (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
    AIC is the Akaike information criterion and BIC is the Bayes
    Information criterion. Such criteria are useful to select the value
    of the regularization parameter by making a trade-off between the
    goodness of fit and the complexity of the model. A good model should
    explain well the data while being simple.
    Read more in the :ref:`User Guide <least_angle_regression>`.
    Parameters
    ----------
    criterion : 'bic' | 'aic'
        The type of criterion to use.
    fit_intercept : boolean
        whether to calculate the intercept for this model. If set
        to false, no intercept will be used in calculations
        (e.g. data is expected to be already centered).
    positive : boolean (default=False)
        Restrict coefficients to be >= 0. Be aware that you might want to
        remove fit_intercept which is set True by default.
        Under the positive restriction the model coefficients do not converge
        to the ordinary-least-squares solution for small values of alpha.
        Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
        0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
        algorithm are typically in congruence with the solution of the
        coordinate descent Lasso estimator.
        As a consequence using LassoLarsIC only makes sense for problems where
        a sparse solution is expected and/or reached.
    verbose : boolean or integer, optional
        Sets the verbosity amount
    normalize : boolean, optional, default False
        If True, the regressors X will be normalized before regression.
    copy_X : boolean, optional, default True
        If True, X will be copied; else, it may be overwritten.
    precompute : True | False | 'auto' | array-like
        Whether to use a precomputed Gram matrix to speed up
        calculations. If set to ``'auto'`` let us decide. The Gram
        matrix can also be passed as argument.
    max_iter : integer, optional
        Maximum number of iterations to perform. Can be used for
        early stopping.
    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Unlike the ``tol`` parameter in some iterative
        optimization-based algorithms, this parameter does not control
        the tolerance of the optimization.
    Attributes
    ----------
    coef_ : array, shape (n_features,)
        parameter vector (w in the formulation formula)
    intercept_ : float
        independent term in decision function.
    alpha_ : float
        the alpha parameter chosen by the information criterion
    n_iter_ : int
        number of iterations run by lars_path to find the grid of
        alphas.
    criterion_ : array, shape (n_alphas,)
        The value of the information criteria ('aic', 'bic') across all
        alphas. The alpha which has the smallest information criteria
        is chosen.
    Examples
    --------
    >>> from sklearn import linear_model
    >>> clf = linear_model.LassoLarsIC(criterion='bic')
    >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
    ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
    LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
          max_iter=500, normalize=True, positive=False, precompute='auto',
          verbose=False)
    >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
    [ 0.  -1.11...]
    Notes
    -----
    The estimation of the number of degrees of freedom is given by:
    "On the degrees of freedom of the lasso"
    Hui Zou, Trevor Hastie, and Robert Tibshirani
    Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
    http://en.wikipedia.org/wiki/Akaike_information_criterion
    http://en.wikipedia.org/wiki/Bayesian_information_criterion
    See also
    --------
    lars_path, LassoLars, LassoLarsCV
    """
    def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
                 normalize=True, precompute='auto', max_iter=500,
                 eps=np.finfo(np.float).eps, copy_X=True, positive=False):
        self.criterion = criterion
        self.fit_intercept = fit_intercept
        self.positive = positive
        self.max_iter = max_iter
        self.verbose = verbose
        self.normalize = normalize
        self.copy_X = copy_X
        self.precompute = precompute
        self.eps = eps
    def fit(self, X, y, copy_X=True):
        """Fit the model using X, y as training data.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            training data.
        y : array-like, shape (n_samples,)
            target values.
        copy_X : boolean, optional, default True
            If ``True``, X will be copied; else, it may be overwritten.
        Returns
        -------
        self : object
            returns an instance of self.
        """
        self.fit_path = True
        X, y = check_X_y(X, y, y_numeric=True)
        X, y, Xmean, ymean, Xstd = LinearModel._center_data(
            X, y, self.fit_intercept, self.normalize, self.copy_X)
        max_iter = self.max_iter
        Gram = self._get_gram()
        alphas_, active_, coef_path_, self.n_iter_ = lars_path(
            X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
            method='lasso', verbose=self.verbose, max_iter=max_iter,
            eps=self.eps, return_n_iter=True, positive=self.positive)
        n_samples = X.shape[0]
        if self.criterion == 'aic':
            K = 2  # AIC
        elif self.criterion == 'bic':
            K = log(n_samples)  # BIC
        else:
            raise ValueError('criterion should be either bic or aic')
        R = y[:, np.newaxis] - np.dot(X, coef_path_)  # residuals
        mean_squared_error = np.mean(R ** 2, axis=0)
        df = np.zeros(coef_path_.shape[1], dtype=np.int)  # Degrees of freedom
        for k, coef in enumerate(coef_path_.T):
            mask = np.abs(coef) > np.finfo(coef.dtype).eps
            if not np.any(mask):
                continue
            # get the number of degrees of freedom equal to:
            # Xc = X[:, mask]
            # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
            df[k] = np.sum(mask)
        self.alphas_ = alphas_
        with np.errstate(divide='ignore'):
            self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
        n_best = np.argmin(self.criterion_)
        self.alpha_ = alphas_[n_best]
        self.coef_ = coef_path_[:, n_best]
        self._set_intercept(Xmean, ymean, Xstd)
        return self
 | 
	bsd-3-clause | 
| 
	sdiazpier/nest-simulator | 
	pynest/examples/brunel_alpha_evolution_strategies.py | 
	2 | 
	20701 | 
	# -*- coding: utf-8 -*-
#
# brunel_alpha_evolution_strategies.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST.  If not, see <http://www.gnu.org/licenses/>.
"""Use evolution strategies to find parameters for a random balanced network (alpha synapses)
-----------------------------------------------------------------------------------------------------
This script uses an optimization algorithm to find the appropriate
parameter values for the external drive "eta" and the relative ratio
of excitation and inhibition "g" for a balanced random network that
lead to particular population-averaged rates, coefficients of
variation and correlations.
From an initial Gaussian search distribution parameterized with mean
and standard deviation network parameters are sampled. Network
realizations of these parameters are simulated and evaluated according
to an objective function that measures how close the activity
statistics are to their desired values (~fitness). From these fitness
values the approximate natural gradient of the fitness landscape is
computed and used to update the parameters of the search
distribution. This procedure is repeated until the maximal number of
function evaluations is reached or the width of the search
distribution becomes extremely small.  We use the following fitness
function:
.. math::
    f = - alpha(r - r*)^2 - beta(cv - cv*)^2 - gamma(corr - corr*)^2
where `alpha`, `beta` and `gamma` are weighting factors, and stars indicate
target values.
The network contains an excitatory and an inhibitory population on
the basis of the network used in [1]_.
The optimization algorithm (evolution strategies) is described in
Wierstra et al. [2]_.
References
~~~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of Sparsely Connected Networks of
       Excitatory and Inhibitory Spiking Neurons. Journal of Computational
       Neuroscience 8, 183-208.
.. [2] Wierstra et al. (2014). Natural evolution strategies. Journal of
       Machine Learning Research, 15(1), 949-980.
See Also
~~~~~~~~~~
:doc:`brunel_alpha_nest`
Authors
~~~~~~~
Jakob Jordan
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import scipy.special as sp
import nest
###############################################################################
# Analysis
def cut_warmup_time(spikes, warmup_time):
    # Removes initial warmup time from recorded spikes
    spikes['senders'] = spikes['senders'][
        spikes['times'] > warmup_time]
    spikes['times'] = spikes['times'][
        spikes['times'] > warmup_time]
    return spikes
def compute_rate(spikes, N_rec, sim_time):
    # Computes average rate from recorded spikes
    return (1. * len(spikes['times']) / N_rec / sim_time * 1e3)
def sort_spikes(spikes):
    # Sorts recorded spikes by node ID
    unique_node_ids = sorted(np.unique(spikes['senders']))
    spiketrains = []
    for node_id in unique_node_ids:
        spiketrains.append(spikes['times'][spikes['senders'] == node_id])
    return unique_node_ids, spiketrains
def compute_cv(spiketrains):
    # Computes coefficient of variation from sorted spikes
    if spiketrains:
        isis = np.hstack([np.diff(st) for st in spiketrains])
        if len(isis) > 1:
            return np.std(isis) / np.mean(isis)
        else:
            return 0.
    else:
        return 0.
def bin_spiketrains(spiketrains, t_min, t_max, t_bin):
    # Bins sorted spikes
    bins = np.arange(t_min, t_max, t_bin)
    return bins, [np.histogram(s, bins=bins)[0] for s in spiketrains]
def compute_correlations(binned_spiketrains):
    # Computes correlations from binned spiketrains
    n = len(binned_spiketrains)
    if n > 1:
        cc = np.corrcoef(binned_spiketrains)
        return 1. / (n * (n - 1.)) * (np.sum(cc) - n)
    else:
        return 0.
def compute_statistics(parameters, espikes, ispikes):
    # Computes population-averaged rates coefficients of variation and
    # correlations from recorded spikes of excitatory and inhibitory
    # populations
    espikes = cut_warmup_time(espikes, parameters['warmup_time'])
    ispikes = cut_warmup_time(ispikes, parameters['warmup_time'])
    erate = compute_rate(espikes, parameters['N_rec'], parameters['sim_time'])
    irate = compute_rate(espikes, parameters['N_rec'], parameters['sim_time'])
    enode_ids, espiketrains = sort_spikes(espikes)
    inode_ids, ispiketrains = sort_spikes(ispikes)
    ecv = compute_cv(espiketrains)
    icv = compute_cv(ispiketrains)
    ecorr = compute_correlations(
        bin_spiketrains(espiketrains, 0., parameters['sim_time'], 1.)[1])
    icorr = compute_correlations(
        bin_spiketrains(ispiketrains, 0., parameters['sim_time'], 1.)[1])
    return (np.mean([erate, irate]),
            np.mean([ecv, icv]),
            np.mean([ecorr, icorr]))
###############################################################################
# Network simulation
def simulate(parameters):
    # Simulates the network and returns recorded spikes for excitatory
    # and inhibitory population
    # Code taken from brunel_alpha_nest.py
    def LambertWm1(x):
        # Using scipy to mimic the gsl_sf_lambert_Wm1 function.
        return sp.lambertw(x, k=-1 if x < 0 else 0).real
    def ComputePSPnorm(tauMem, CMem, tauSyn):
        a = (tauMem / tauSyn)
        b = (1.0 / tauSyn - 1.0 / tauMem)
        # time of maximum
        t_max = 1.0 / b * (-LambertWm1(-np.exp(-1.0 / a) / a) - 1.0 / a)
        # maximum of PSP for current of unit amplitude
        return (np.exp(1.0) / (tauSyn * CMem * b) *
                ((np.exp(-t_max / tauMem) - np.exp(-t_max / tauSyn)) / b -
                 t_max * np.exp(-t_max / tauSyn)))
    # number of excitatory neurons
    NE = int(parameters['gamma'] * parameters['N'])
    # number of inhibitory neurons
    NI = parameters['N'] - NE
    # number of excitatory synapses per neuron
    CE = int(parameters['epsilon'] * NE)
    # number of inhibitory synapses per neuron
    CI = int(parameters['epsilon'] * NI)
    tauSyn = 0.5  # synaptic time constant in ms
    tauMem = 20.0  # time constant of membrane potential in ms
    CMem = 250.0  # capacitance of membrane in in pF
    theta = 20.0  # membrane threshold potential in mV
    neuron_parameters = {
        'C_m': CMem,
        'tau_m': tauMem,
        'tau_syn_ex': tauSyn,
        'tau_syn_in': tauSyn,
        't_ref': 2.0,
        'E_L': 0.0,
        'V_reset': 0.0,
        'V_m': 0.0,
        'V_th': theta
    }
    J = 0.1        # postsynaptic amplitude in mV
    J_unit = ComputePSPnorm(tauMem, CMem, tauSyn)
    J_ex = J / J_unit  # amplitude of excitatory postsynaptic current
    # amplitude of inhibitory postsynaptic current
    J_in = -parameters['g'] * J_ex
    nu_th = (theta * CMem) / (J_ex * CE * np.exp(1) * tauMem * tauSyn)
    nu_ex = parameters['eta'] * nu_th
    p_rate = 1000.0 * nu_ex * CE
    nest.ResetKernel()
    nest.set_verbosity('M_FATAL')
    nest.SetKernelStatus({'rng_seed': parameters['seed'],
                          'resolution': parameters['dt']})
    nest.SetDefaults('iaf_psc_alpha', neuron_parameters)
    nest.SetDefaults('poisson_generator', {'rate': p_rate})
    nodes_ex = nest.Create('iaf_psc_alpha', NE)
    nodes_in = nest.Create('iaf_psc_alpha', NI)
    noise = nest.Create('poisson_generator')
    espikes = nest.Create('spike_recorder', params={'label': 'brunel-py-ex'})
    ispikes = nest.Create('spike_recorder', params={'label': 'brunel-py-in'})
    nest.CopyModel('static_synapse', 'excitatory',
                   {'weight': J_ex, 'delay': parameters['delay']})
    nest.CopyModel('static_synapse', 'inhibitory',
                   {'weight': J_in, 'delay': parameters['delay']})
    nest.Connect(noise, nodes_ex, syn_spec='excitatory')
    nest.Connect(noise, nodes_in, syn_spec='excitatory')
    if parameters['N_rec'] > NE:
        raise ValueError(
            'Requested recording from {} neurons, \
            but only {} in excitatory population'.format(
                parameters['N_rec'], NE))
    if parameters['N_rec'] > NI:
        raise ValueError(
            'Requested recording from {} neurons, \
            but only {} in inhibitory population'.format(
                parameters['N_rec'], NI))
    nest.Connect(nodes_ex[:parameters['N_rec']], espikes)
    nest.Connect(nodes_in[:parameters['N_rec']], ispikes)
    conn_parameters_ex = {'rule': 'fixed_indegree', 'indegree': CE}
    nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_parameters_ex, 'excitatory')
    conn_parameters_in = {'rule': 'fixed_indegree', 'indegree': CI}
    nest.Connect(nodes_in, nodes_ex + nodes_in, conn_parameters_in, 'inhibitory')
    nest.Simulate(parameters['sim_time'])
    return (espikes.events,
            ispikes.events)
###############################################################################
# Optimization
def default_population_size(dimensions):
    # Returns a population size suited for the given number of dimensions
    # See Wierstra et al. (2014)
    return 4 + int(np.floor(3 * np.log(dimensions)))
def default_learning_rate_mu():
    # Returns a default learning rate for the mean of the search distribution
    # See Wierstra et al. (2014)
    return 1
def default_learning_rate_sigma(dimensions):
    # Returns a default learning rate for the standard deviation of the
    # search distribution for the given number of dimensions
    # See Wierstra et al. (2014)
    return (3 + np.log(dimensions)) / (12. * np.sqrt(dimensions))
def compute_utility(fitness):
    # Computes utility and order used for fitness shaping
    # See Wierstra et al. (2014)
    n = len(fitness)
    order = np.argsort(fitness)[::-1]
    fitness = fitness[order]
    utility = [
        np.max([0, np.log((n / 2) + 1)]) - np.log(k + 1) for k in range(n)]
    utility = utility / np.sum(utility) - 1. / n
    return order, utility
def optimize(func, mu, sigma, learning_rate_mu=None, learning_rate_sigma=None,
             population_size=None, fitness_shaping=True,
             mirrored_sampling=True, record_history=False,
             max_generations=2000, min_sigma=1e-8, verbosity=0):
    ###########################################################################
    # Optimizes an objective function via evolution strategies using the
    # natural gradient of multinormal search distributions in natural
    # coordinates.  Does not consider covariances between parameters (
    # "Separable natural evolution strategies").
    # See Wierstra et al. (2014)
    #
    # Parameters
    # ----------
    # func: function
    #     The function to be maximized.
    # mu: float
    #     Initial mean of the search distribution.
    # sigma: float
    #     Initial standard deviation of the search distribution.
    # learning_rate_mu: float
    #     Learning rate of mu.
    # learning_rate_sigma: float
    #     Learning rate of sigma.
    # population_size: int
    #     Number of individuals sampled in each generation.
    # fitness_shaping: bool
    #     Whether to use fitness shaping, compensating for large
    #     deviations in fitness, see Wierstra et al. (2014).
    # mirrored_sampling: bool
    #     Whether to use mirrored sampling, i.e., evaluating a mirrored
    #     sample for each sample, see Wierstra et al. (2014).
    # record_history: bool
    #     Whether to record history of search distribution parameters,
    #     fitness values and individuals.
    # max_generations: int
    #     Maximal number of generations.
    # min_sigma: float
    #     Minimal value for standard deviation of search
    #     distribution. If any dimension has a value smaller than this,
    #     the search is stoppped.
    # verbosity: bool
    #     Whether to continuously print progress information.
    #
    # Returns
    # -------
    # dict
    #     Dictionary of final parameters of search distribution and
    #     history.
    if not isinstance(mu, np.ndarray):
        raise TypeError('mu needs to be of type np.ndarray')
    if not isinstance(sigma, np.ndarray):
        raise TypeError('sigma needs to be of type np.ndarray')
    if learning_rate_mu is None:
        learning_rate_mu = default_learning_rate_mu()
    if learning_rate_sigma is None:
        learning_rate_sigma = default_learning_rate_sigma(mu.size)
    if population_size is None:
        population_size = default_population_size(mu.size)
    generation = 0
    mu_history = []
    sigma_history = []
    pop_history = []
    fitness_history = []
    while True:
        # create new population using the search distribution
        s = np.random.normal(0, 1, size=(population_size,) + np.shape(mu))
        z = mu + sigma * s
        # add mirrored perturbations if enabled
        if mirrored_sampling:
            z = np.vstack([z, mu - sigma * s])
            s = np.vstack([s, -s])
        # evaluate fitness for every individual in population
        fitness = np.fromiter((func(*zi) for zi in z), np.float)
        # print status if enabled
        if verbosity > 0:
            print(
                '# Generation {:d} | fitness {:.3f} | mu {} | sigma {}'.format(
                    generation, np.mean(fitness),
                    ', '.join(str(np.round(mu_i, 3)) for mu_i in mu),
                    ', '.join(str(np.round(sigma_i, 3)) for sigma_i in sigma)
                ))
        # apply fitness shaping if enabled
        if fitness_shaping:
            order, utility = compute_utility(fitness)
            s = s[order]
            z = z[order]
        else:
            utility = fitness
        # bookkeeping
        if record_history:
            mu_history.append(mu.copy())
            sigma_history.append(sigma.copy())
            pop_history.append(z.copy())
            fitness_history.append(fitness)
        # exit if max generations reached or search distributions are
        # very narrow
        if generation == max_generations or np.all(sigma < min_sigma):
            break
        # update parameter of search distribution via natural gradient
        # descent in natural coordinates
        mu += learning_rate_mu * sigma * np.dot(utility, s)
        sigma *= np.exp(learning_rate_sigma / 2. * np.dot(utility, s**2 - 1))
        generation += 1
    return {
        'mu': mu,
        'sigma': sigma,
        'fitness_history': np.array(fitness_history),
        'mu_history': np.array(mu_history),
        'sigma_history': np.array(sigma_history),
        'pop_history': np.array(pop_history)
    }
def optimize_network(optimization_parameters, simulation_parameters):
    # Searches for suitable network parameters to fulfill defined constraints
    np.random.seed(simulation_parameters['seed'])
    def objective_function(g, eta):
        # Returns the fitness of a specific network parametrization
        # create local copy of parameters that uses parameters given
        # by optimization algorithm
        simulation_parameters_local = simulation_parameters.copy()
        simulation_parameters_local['g'] = g
        simulation_parameters_local['eta'] = eta
        # perform the network simulation
        espikes, ispikes = simulate(simulation_parameters_local)
        # analyse the result and compute fitness
        rate, cv, corr = compute_statistics(
            simulation_parameters, espikes, ispikes)
        fitness = \
            - optimization_parameters['fitness_weight_rate'] * (
                rate - optimization_parameters['target_rate']) ** 2 \
            - optimization_parameters['fitness_weight_cv'] * (
                cv - optimization_parameters['target_cv']) ** 2 \
            - optimization_parameters['fitness_weight_corr'] * (
                corr - optimization_parameters['target_corr']) ** 2
        return fitness
    return optimize(
        objective_function,
        np.array(optimization_parameters['mu']),
        np.array(optimization_parameters['sigma']),
        max_generations=optimization_parameters['max_generations'],
        record_history=True,
        verbosity=optimization_parameters['verbosity']
    )
###############################################################################
# Main
if __name__ == '__main__':
    simulation_parameters = {
        'seed': 123,
        'dt': 0.1,            # (ms) simulation resolution
        'sim_time': 1000.,    # (ms) simulation duration
        'warmup_time': 300.,  # (ms) duration ignored during analysis
        'delay': 1.5,         # (ms) synaptic delay
        'g': None,            # relative ratio of excitation and inhibition
        'eta': None,          # relative strength of external drive
        'epsilon': 0.1,       # average connectivity of network
        'N': 400,             # number of neurons in network
        'gamma': 0.8,         # relative size of excitatory and
                              # inhibitory population
        'N_rec': 40,          # number of neurons to record activity from
    }
    optimization_parameters = {
        'verbosity': 1,             # print progress over generations
        'max_generations': 20,      # maximal number of generations
        'target_rate': 1.89,        # (spikes/s) target rate
        'target_corr': 0.0,         # target correlation
        'target_cv': 1.,            # target coefficient of variation
        'mu': [1., 3.],             # initial mean for search distribution
                                    # (mu(g), mu(eta))
        'sigma': [0.15, 0.05],      # initial sigma for search
                                    # distribution (sigma(g), sigma(eta))
        # hyperparameters of the fitness function; these are used to
        # compensate for the different typical scales of the
        # individual measures, rate ~ O(1), cv ~ (0.1), corr ~ O(0.01)
        'fitness_weight_rate': 1.,    # relative weight of rate deviation
        'fitness_weight_cv': 10.,     # relative weight of cv deviation
        'fitness_weight_corr': 100.,  # relative weight of corr deviation
    }
    # optimize network parameters
    optimization_result = optimize_network(optimization_parameters,
                                           simulation_parameters)
    simulation_parameters['g'] = optimization_result['mu'][0]
    simulation_parameters['eta'] = optimization_result['mu'][1]
    espikes, ispikes = simulate(simulation_parameters)
    rate, cv, corr = compute_statistics(
        simulation_parameters, espikes, ispikes)
    print('Statistics after optimization:', end=' ')
    print('Rate: {:.3f}, cv: {:.3f}, correlation: {:.3f}'.format(
        rate, cv, corr))
    # plot results
    fig = plt.figure(figsize=(10, 4))
    ax1 = fig.add_axes([0.06, 0.12, 0.25, 0.8])
    ax2 = fig.add_axes([0.4, 0.12, 0.25, 0.8])
    ax3 = fig.add_axes([0.74, 0.12, 0.25, 0.8])
    ax1.set_xlabel('Time (ms)')
    ax1.set_ylabel('Neuron id')
    ax2.set_xlabel(r'Relative strength of inhibition $g$')
    ax2.set_ylabel(r'Relative strength of external drive $\eta$')
    ax3.set_xlabel('Generation')
    ax3.set_ylabel('Fitness')
    # raster plot
    ax1.plot(espikes['times'], espikes['senders'], ls='', marker='.')
    # search distributions and individuals
    for mu, sigma in zip(optimization_result['mu_history'],
                         optimization_result['sigma_history']):
        ellipse = Ellipse(
            xy=mu, width=2 * sigma[0], height=2 * sigma[1], alpha=0.5, fc='k')
        ellipse.set_clip_box(ax2.bbox)
        ax2.add_artist(ellipse)
    ax2.plot(optimization_result['mu_history'][:, 0],
             optimization_result['mu_history'][:, 1],
             marker='.', color='k', alpha=0.5)
    for generation in optimization_result['pop_history']:
        ax2.scatter(generation[:, 0], generation[:, 1])
    # fitness over generations
    ax3.errorbar(np.arange(len(optimization_result['fitness_history'])),
                 np.mean(optimization_result['fitness_history'], axis=1),
                 yerr=np.std(optimization_result['fitness_history'], axis=1))
    fig.savefig('brunel_alpha_evolution_strategies.pdf')
 | 
	gpl-2.0 | 
| 
	idlead/scikit-learn | 
	sklearn/grid_search.py | 
	5 | 
	38455 | 
	"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
#         Gael Varoquaux <[email protected]>
#         Andreas Mueller <[email protected]>
#         Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
           'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module has been deprecated in favor of the "
              "model_selection module into which all the refactored classes "
              "and functions are moved. This module will be removed in 0.20.",
              DeprecationWarning)
class ParameterGrid(object):
    """Grid of parameters with a discrete number of values for each.
    Can be used to iterate over parameter value combinations with the
    Python built-in function iter.
    Read more in the :ref:`User Guide <grid_search>`.
    Parameters
    ----------
    param_grid : dict of string to sequence, or sequence of such
        The parameter grid to explore, as a dictionary mapping estimator
        parameters to sequences of allowed values.
        An empty dict signifies default parameters.
        A sequence of dicts signifies a sequence of grids to search, and is
        useful to avoid exploring parameter combinations that make no sense
        or have no effect. See the examples below.
    Examples
    --------
    >>> from sklearn.grid_search import ParameterGrid
    >>> param_grid = {'a': [1, 2], 'b': [True, False]}
    >>> list(ParameterGrid(param_grid)) == (
    ...    [{'a': 1, 'b': True}, {'a': 1, 'b': False},
    ...     {'a': 2, 'b': True}, {'a': 2, 'b': False}])
    True
    >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
    >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
    ...                               {'kernel': 'rbf', 'gamma': 1},
    ...                               {'kernel': 'rbf', 'gamma': 10}]
    True
    >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
    True
    See also
    --------
    :class:`GridSearchCV`:
        uses ``ParameterGrid`` to perform a full parallelized parameter search.
    """
    def __init__(self, param_grid):
        if isinstance(param_grid, Mapping):
            # wrap dictionary in a singleton list to support either dict
            # or list of dicts
            param_grid = [param_grid]
        self.param_grid = param_grid
    def __iter__(self):
        """Iterate over the points in the grid.
        Returns
        -------
        params : iterator over dict of string to any
            Yields dictionaries mapping each estimator parameter to one of its
            allowed values.
        """
        for p in self.param_grid:
            # Always sort the keys of a dictionary, for reproducibility
            items = sorted(p.items())
            if not items:
                yield {}
            else:
                keys, values = zip(*items)
                for v in product(*values):
                    params = dict(zip(keys, v))
                    yield params
    def __len__(self):
        """Number of points on the grid."""
        # Product function that can handle iterables (np.product can't).
        product = partial(reduce, operator.mul)
        return sum(product(len(v) for v in p.values()) if p else 1
                   for p in self.param_grid)
    def __getitem__(self, ind):
        """Get the parameters that would be ``ind``th in iteration
        Parameters
        ----------
        ind : int
            The iteration index
        Returns
        -------
        params : dict of string to any
            Equal to list(self)[ind]
        """
        # This is used to make discrete sampling without replacement memory
        # efficient.
        for sub_grid in self.param_grid:
            # XXX: could memoize information used here
            if not sub_grid:
                if ind == 0:
                    return {}
                else:
                    ind -= 1
                    continue
            # Reverse so most frequent cycling parameter comes first
            keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
            sizes = [len(v_list) for v_list in values_lists]
            total = np.product(sizes)
            if ind >= total:
                # Try the next grid
                ind -= total
            else:
                out = {}
                for key, v_list, n in zip(keys, values_lists, sizes):
                    ind, offset = divmod(ind, n)
                    out[key] = v_list[offset]
                return out
        raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
    """Generator on parameters sampled from given distributions.
    Non-deterministic iterable over random candidate combinations for hyper-
    parameter search. If all parameters are presented as a list,
    sampling without replacement is performed. If at least one parameter
    is given as a distribution, sampling with replacement is used.
    It is highly recommended to use continuous distributions for continuous
    parameters.
    Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
    a custom RNG instance and always use the singleton RNG from
    ``numpy.random``. Hence setting ``random_state`` will not guarantee a
    deterministic iteration whenever ``scipy.stats`` distributions are used to
    define the parameter search space.
    Read more in the :ref:`User Guide <grid_search>`.
    Parameters
    ----------
    param_distributions : dict
        Dictionary where the keys are parameters and values
        are distributions from which a parameter is to be sampled.
        Distributions either have to provide a ``rvs`` function
        to sample from them, or can be given as a list of values,
        where a uniform distribution is assumed.
    n_iter : integer
        Number of parameter settings that are produced.
    random_state : int or RandomState
        Pseudo random number generator state used for random uniform sampling
        from lists of possible values instead of scipy.stats distributions.
    Returns
    -------
    params : dict of string to any
        **Yields** dictionaries mapping each estimator parameter to
        as sampled value.
    Examples
    --------
    >>> from sklearn.grid_search import ParameterSampler
    >>> from scipy.stats.distributions import expon
    >>> import numpy as np
    >>> np.random.seed(0)
    >>> param_grid = {'a':[1, 2], 'b': expon()}
    >>> param_list = list(ParameterSampler(param_grid, n_iter=4))
    >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
    ...                 for d in param_list]
    >>> rounded_list == [{'b': 0.89856, 'a': 1},
    ...                  {'b': 0.923223, 'a': 1},
    ...                  {'b': 1.878964, 'a': 2},
    ...                  {'b': 1.038159, 'a': 2}]
    True
    """
    def __init__(self, param_distributions, n_iter, random_state=None):
        self.param_distributions = param_distributions
        self.n_iter = n_iter
        self.random_state = random_state
    def __iter__(self):
        # check if all distributions are given as lists
        # in this case we want to sample without replacement
        all_lists = np.all([not hasattr(v, "rvs")
                            for v in self.param_distributions.values()])
        rnd = check_random_state(self.random_state)
        if all_lists:
            # look up sampled parameter settings in parameter grid
            param_grid = ParameterGrid(self.param_distributions)
            grid_size = len(param_grid)
            if grid_size < self.n_iter:
                raise ValueError(
                    "The total space of parameters %d is smaller "
                    "than n_iter=%d." % (grid_size, self.n_iter)
                    + " For exhaustive searches, use GridSearchCV.")
            for i in sample_without_replacement(grid_size, self.n_iter,
                                                random_state=rnd):
                yield param_grid[i]
        else:
            # Always sort the keys of a dictionary, for reproducibility
            items = sorted(self.param_distributions.items())
            for _ in six.moves.range(self.n_iter):
                params = dict()
                for k, v in items:
                    if hasattr(v, "rvs"):
                        params[k] = v.rvs()
                    else:
                        params[k] = v[rnd.randint(len(v))]
                yield params
    def __len__(self):
        """Number of points that will be sampled."""
        return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
                   verbose, error_score='raise', **fit_params):
    """Run fit on one set of parameters.
    Parameters
    ----------
    X : array-like, sparse matrix or list
        Input data.
    y : array-like or None
        Targets for input data.
    estimator : estimator object
        A object of that type is instantiated for each grid point.
        This is assumed to implement the scikit-learn estimator interface.
        Either estimator needs to provide a ``score`` function,
        or ``scoring`` must be passed.
    parameters : dict
        Parameters to be set on estimator for this grid point.
    train : ndarray, dtype int or bool
        Boolean mask or indices for training set.
    test : ndarray, dtype int or bool
        Boolean mask or indices for test set.
    scorer : callable or None.
        If provided must be a scorer callable object / function with signature
        ``scorer(estimator, X, y)``.
    verbose : int
        Verbosity level.
    **fit_params : kwargs
        Additional parameter passed to the fit function of the estimator.
    error_score : 'raise' (default) or numeric
        Value to assign to the score if an error occurs in estimator fitting.
        If set to 'raise', the error is raised. If a numeric value is given,
        FitFailedWarning is raised. This parameter does not affect the refit
        step, which will always raise the error.
    Returns
    -------
    score : float
        Score of this parameter setting on given training / test split.
    parameters : dict
        The parameters that have been evaluated.
    n_samples_test : int
        Number of test samples in this split.
    """
    score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
                                              test, verbose, parameters,
                                              fit_params, error_score)
    return score, parameters, n_samples_test
def _check_param_grid(param_grid):
    if hasattr(param_grid, 'items'):
        param_grid = [param_grid]
    for p in param_grid:
        for v in p.values():
            if isinstance(v, np.ndarray) and v.ndim > 1:
                raise ValueError("Parameter array should be one-dimensional.")
            check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
            if True not in check:
                raise ValueError("Parameter values should be a list.")
            if len(v) == 0:
                raise ValueError("Parameter values should be a non-empty "
                                 "list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
                                ('parameters',
                                 'mean_validation_score',
                                 'cv_validation_scores'))):
    # A raw namedtuple is very memory efficient as it packs the attributes
    # in a struct to get rid of the __dict__ of attributes in particular it
    # does not copy the string for the keys on each instance.
    # By deriving a namedtuple class just to introduce the __repr__ method we
    # would also reintroduce the __dict__ on the instance. By telling the
    # Python interpreter that this subclass uses static __slots__ instead of
    # dynamic attributes. Furthermore we don't need any additional slot in the
    # subclass so we set __slots__ to the empty tuple.
    __slots__ = ()
    def __repr__(self):
        """Simple custom repr to summarize the main info"""
        return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
            self.mean_validation_score,
            np.std(self.cv_validation_scores),
            self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
                                      MetaEstimatorMixin)):
    """Base class for hyper parameter search with cross-validation."""
    @abstractmethod
    def __init__(self, estimator, scoring=None,
                 fit_params=None, n_jobs=1, iid=True,
                 refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
                 error_score='raise'):
        self.scoring = scoring
        self.estimator = estimator
        self.n_jobs = n_jobs
        self.fit_params = fit_params if fit_params is not None else {}
        self.iid = iid
        self.refit = refit
        self.cv = cv
        self.verbose = verbose
        self.pre_dispatch = pre_dispatch
        self.error_score = error_score
    @property
    def _estimator_type(self):
        return self.estimator._estimator_type
    def score(self, X, y=None):
        """Returns the score on the given data, if the estimator has been refit.
        This uses the score defined by ``scoring`` where provided, and the
        ``best_estimator_.score`` method otherwise.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Input data, where n_samples is the number of samples and
            n_features is the number of features.
        y : array-like, shape = [n_samples] or [n_samples, n_output], optional
            Target relative to X for classification or regression;
            None for unsupervised learning.
        Returns
        -------
        score : float
        Notes
        -----
         * The long-standing behavior of this method changed in version 0.16.
         * It no longer uses the metric provided by ``estimator.score`` if the
           ``scoring`` parameter was set when fitting.
        """
        if self.scorer_ is None:
            raise ValueError("No score function explicitly defined, "
                             "and the estimator doesn't provide one %s"
                             % self.best_estimator_)
        if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
            warnings.warn("The long-standing behavior to use the estimator's "
                          "score function in {0}.score has changed. The "
                          "scoring parameter is now used."
                          "".format(self.__class__.__name__),
                          ChangedBehaviorWarning)
        return self.scorer_(self.best_estimator_, X, y)
    @if_delegate_has_method(delegate='estimator')
    def predict(self, X):
        """Call predict on the estimator with the best found parameters.
        Only available if ``refit=True`` and the underlying estimator supports
        ``predict``.
        Parameters
        -----------
        X : indexable, length n_samples
            Must fulfill the input assumptions of the
            underlying estimator.
        """
        return self.best_estimator_.predict(X)
    @if_delegate_has_method(delegate='estimator')
    def predict_proba(self, X):
        """Call predict_proba on the estimator with the best found parameters.
        Only available if ``refit=True`` and the underlying estimator supports
        ``predict_proba``.
        Parameters
        -----------
        X : indexable, length n_samples
            Must fulfill the input assumptions of the
            underlying estimator.
        """
        return self.best_estimator_.predict_proba(X)
    @if_delegate_has_method(delegate='estimator')
    def predict_log_proba(self, X):
        """Call predict_log_proba on the estimator with the best found parameters.
        Only available if ``refit=True`` and the underlying estimator supports
        ``predict_log_proba``.
        Parameters
        -----------
        X : indexable, length n_samples
            Must fulfill the input assumptions of the
            underlying estimator.
        """
        return self.best_estimator_.predict_log_proba(X)
    @if_delegate_has_method(delegate='estimator')
    def decision_function(self, X):
        """Call decision_function on the estimator with the best found parameters.
        Only available if ``refit=True`` and the underlying estimator supports
        ``decision_function``.
        Parameters
        -----------
        X : indexable, length n_samples
            Must fulfill the input assumptions of the
            underlying estimator.
        """
        return self.best_estimator_.decision_function(X)
    @if_delegate_has_method(delegate='estimator')
    def transform(self, X):
        """Call transform on the estimator with the best found parameters.
        Only available if the underlying estimator supports ``transform`` and
        ``refit=True``.
        Parameters
        -----------
        X : indexable, length n_samples
            Must fulfill the input assumptions of the
            underlying estimator.
        """
        return self.best_estimator_.transform(X)
    @if_delegate_has_method(delegate='estimator')
    def inverse_transform(self, Xt):
        """Call inverse_transform on the estimator with the best found parameters.
        Only available if the underlying estimator implements ``inverse_transform`` and
        ``refit=True``.
        Parameters
        -----------
        Xt : indexable, length n_samples
            Must fulfill the input assumptions of the
            underlying estimator.
        """
        return self.best_estimator_.transform(Xt)
    def _fit(self, X, y, parameter_iterable):
        """Actual fitting,  performing the search over parameters."""
        estimator = self.estimator
        cv = self.cv
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
        n_samples = _num_samples(X)
        X, y = indexable(X, y)
        if y is not None:
            if len(y) != n_samples:
                raise ValueError('Target variable (y) has a different number '
                                 'of samples (%i) than data (X: %i samples)'
                                 % (len(y), n_samples))
        cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
        if self.verbose > 0:
            if isinstance(parameter_iterable, Sized):
                n_candidates = len(parameter_iterable)
                print("Fitting {0} folds for each of {1} candidates, totalling"
                      " {2} fits".format(len(cv), n_candidates,
                                         n_candidates * len(cv)))
        base_estimator = clone(self.estimator)
        pre_dispatch = self.pre_dispatch
        out = Parallel(
            n_jobs=self.n_jobs, verbose=self.verbose,
            pre_dispatch=pre_dispatch
        )(
            delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
                                    train, test, self.verbose, parameters,
                                    self.fit_params, return_parameters=True,
                                    error_score=self.error_score)
                for parameters in parameter_iterable
                for train, test in cv)
        # Out is a list of triplet: score, estimator, n_test_samples
        n_fits = len(out)
        n_folds = len(cv)
        scores = list()
        grid_scores = list()
        for grid_start in range(0, n_fits, n_folds):
            n_test_samples = 0
            score = 0
            all_scores = []
            for this_score, this_n_test_samples, _, parameters in \
                    out[grid_start:grid_start + n_folds]:
                all_scores.append(this_score)
                if self.iid:
                    this_score *= this_n_test_samples
                    n_test_samples += this_n_test_samples
                score += this_score
            if self.iid:
                score /= float(n_test_samples)
            else:
                score /= float(n_folds)
            scores.append((score, parameters))
            # TODO: shall we also store the test_fold_sizes?
            grid_scores.append(_CVScoreTuple(
                parameters,
                score,
                np.array(all_scores)))
        # Store the computed scores
        self.grid_scores_ = grid_scores
        # Find the best parameters by comparing on the mean validation score:
        # note that `sorted` is deterministic in the way it breaks ties
        best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
                      reverse=True)[0]
        self.best_params_ = best.parameters
        self.best_score_ = best.mean_validation_score
        if self.refit:
            # fit the best estimator using the entire dataset
            # clone first to work around broken estimators
            best_estimator = clone(base_estimator).set_params(
                **best.parameters)
            if y is not None:
                best_estimator.fit(X, y, **self.fit_params)
            else:
                best_estimator.fit(X, **self.fit_params)
            self.best_estimator_ = best_estimator
        return self
class GridSearchCV(BaseSearchCV):
    """Exhaustive search over specified parameter values for an estimator.
    Important members are fit, predict.
    GridSearchCV implements a "fit" and a "score" method.
    It also implements "predict", "predict_proba", "decision_function",
    "transform" and "inverse_transform" if they are implemented in the
    estimator used.
    The parameters of the estimator used to apply these methods are optimized
    by cross-validated grid-search over a parameter grid.
    Read more in the :ref:`User Guide <grid_search>`.
    Parameters
    ----------
    estimator : estimator object.
        A object of that type is instantiated for each grid point.
        This is assumed to implement the scikit-learn estimator interface.
        Either estimator needs to provide a ``score`` function,
        or ``scoring`` must be passed.
    param_grid : dict or list of dictionaries
        Dictionary with parameters names (string) as keys and lists of
        parameter settings to try as values, or a list of such
        dictionaries, in which case the grids spanned by each dictionary
        in the list are explored. This enables searching over any sequence
        of parameter settings.
    scoring : string, callable or None, default=None
        A string (see model evaluation documentation) or
        a scorer callable object / function with signature
        ``scorer(estimator, X, y)``.
        If ``None``, the ``score`` method of the estimator is used.
    fit_params : dict, optional
        Parameters to pass to the fit method.
    n_jobs : int, default=1
        Number of jobs to run in parallel.
        .. versionchanged:: 0.17
           Upgraded to joblib 0.9.3.
    pre_dispatch : int, or string, optional
        Controls the number of jobs that get dispatched during parallel
        execution. Reducing this number can be useful to avoid an
        explosion of memory consumption when more jobs get dispatched
        than CPUs can process. This parameter can be:
            - None, in which case all the jobs are immediately
              created and spawned. Use this for lightweight and
              fast-running jobs, to avoid delays due to on-demand
              spawning of the jobs
            - An int, giving the exact number of total jobs that are
              spawned
            - A string, giving an expression as a function of n_jobs,
              as in '2*n_jobs'
    iid : boolean, default=True
        If True, the data is assumed to be identically distributed across
        the folds, and the loss minimized is the total loss per sample,
        and not the mean loss across the folds.
    cv : int, cross-validation generator or an iterable, optional
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:
          - None, to use the default 3-fold cross-validation,
          - integer, to specify the number of folds.
          - An object to be used as a cross-validation generator.
          - An iterable yielding train/test splits.
        For integer/None inputs, if ``y`` is binary or multiclass,
        :class:`StratifiedKFold` used. If the estimator is a classifier
        or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.
    refit : boolean, default=True
        Refit the best estimator with the entire dataset.
        If "False", it is impossible to make predictions using
        this GridSearchCV instance after fitting.
    verbose : integer
        Controls the verbosity: the higher, the more messages.
    error_score : 'raise' (default) or numeric
        Value to assign to the score if an error occurs in estimator fitting.
        If set to 'raise', the error is raised. If a numeric value is given,
        FitFailedWarning is raised. This parameter does not affect the refit
        step, which will always raise the error.
    Examples
    --------
    >>> from sklearn import svm, grid_search, datasets
    >>> iris = datasets.load_iris()
    >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
    >>> svr = svm.SVC()
    >>> clf = grid_search.GridSearchCV(svr, parameters)
    >>> clf.fit(iris.data, iris.target)
    ...                             # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
    GridSearchCV(cv=None, error_score=...,
           estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
                         decision_function_shape=None, degree=..., gamma=...,
                         kernel='rbf', max_iter=-1, probability=False,
                         random_state=None, shrinking=True, tol=...,
                         verbose=False),
           fit_params={}, iid=..., n_jobs=1,
           param_grid=..., pre_dispatch=..., refit=...,
           scoring=..., verbose=...)
    Attributes
    ----------
    grid_scores_ : list of named tuples
        Contains scores for all parameter combinations in param_grid.
        Each entry corresponds to one parameter setting.
        Each named tuple has the attributes:
            * ``parameters``, a dict of parameter settings
            * ``mean_validation_score``, the mean score over the
              cross-validation folds
            * ``cv_validation_scores``, the list of scores for each fold
    best_estimator_ : estimator
        Estimator that was chosen by the search, i.e. estimator
        which gave highest score (or smallest loss if specified)
        on the left out data. Not available if refit=False.
    best_score_ : float
        Score of best_estimator on the left out data.
    best_params_ : dict
        Parameter setting that gave the best results on the hold out data.
    scorer_ : function
        Scorer function used on the held out data to choose the best
        parameters for the model.
    Notes
    ------
    The parameters selected are those that maximize the score of the left out
    data, unless an explicit score is passed in which case it is used instead.
    If `n_jobs` was set to a value higher than one, the data is copied for each
    point in the grid (and not `n_jobs` times). This is done for efficiency
    reasons if individual jobs take very little time, but may raise errors if
    the dataset is large and not enough memory is available.  A workaround in
    this case is to set `pre_dispatch`. Then, the memory is copied only
    `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
    n_jobs`.
    See Also
    ---------
    :class:`ParameterGrid`:
        generates all the combinations of a an hyperparameter grid.
    :func:`sklearn.cross_validation.train_test_split`:
        utility function to split the data into a development set usable
        for fitting a GridSearchCV instance and an evaluation set for
        its final evaluation.
    :func:`sklearn.metrics.make_scorer`:
        Make a scorer from a performance metric or loss function.
    """
    def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
                 n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
                 pre_dispatch='2*n_jobs', error_score='raise'):
        super(GridSearchCV, self).__init__(
            estimator, scoring, fit_params, n_jobs, iid,
            refit, cv, verbose, pre_dispatch, error_score)
        self.param_grid = param_grid
        _check_param_grid(param_grid)
    def fit(self, X, y=None):
        """Run fit with all sets of parameters.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vector, where n_samples is the number of samples and
            n_features is the number of features.
        y : array-like, shape = [n_samples] or [n_samples, n_output], optional
            Target relative to X for classification or regression;
            None for unsupervised learning.
        """
        return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
    """Randomized search on hyper parameters.
    RandomizedSearchCV implements a "fit" and a "score" method.
    It also implements "predict", "predict_proba", "decision_function",
    "transform" and "inverse_transform" if they are implemented in the
    estimator used.
    The parameters of the estimator used to apply these methods are optimized
    by cross-validated search over parameter settings.
    In contrast to GridSearchCV, not all parameter values are tried out, but
    rather a fixed number of parameter settings is sampled from the specified
    distributions. The number of parameter settings that are tried is
    given by n_iter.
    If all parameters are presented as a list,
    sampling without replacement is performed. If at least one parameter
    is given as a distribution, sampling with replacement is used.
    It is highly recommended to use continuous distributions for continuous
    parameters.
    Read more in the :ref:`User Guide <randomized_parameter_search>`.
    Parameters
    ----------
    estimator : estimator object.
        A object of that type is instantiated for each grid point.
        This is assumed to implement the scikit-learn estimator interface.
        Either estimator needs to provide a ``score`` function,
        or ``scoring`` must be passed.
    param_distributions : dict
        Dictionary with parameters names (string) as keys and distributions
        or lists of parameters to try. Distributions must provide a ``rvs``
        method for sampling (such as those from scipy.stats.distributions).
        If a list is given, it is sampled uniformly.
    n_iter : int, default=10
        Number of parameter settings that are sampled. n_iter trades
        off runtime vs quality of the solution.
    scoring : string, callable or None, default=None
        A string (see model evaluation documentation) or
        a scorer callable object / function with signature
        ``scorer(estimator, X, y)``.
        If ``None``, the ``score`` method of the estimator is used.
    fit_params : dict, optional
        Parameters to pass to the fit method.
    n_jobs : int, default=1
        Number of jobs to run in parallel.
    pre_dispatch : int, or string, optional
        Controls the number of jobs that get dispatched during parallel
        execution. Reducing this number can be useful to avoid an
        explosion of memory consumption when more jobs get dispatched
        than CPUs can process. This parameter can be:
            - None, in which case all the jobs are immediately
              created and spawned. Use this for lightweight and
              fast-running jobs, to avoid delays due to on-demand
              spawning of the jobs
            - An int, giving the exact number of total jobs that are
              spawned
            - A string, giving an expression as a function of n_jobs,
              as in '2*n_jobs'
    iid : boolean, default=True
        If True, the data is assumed to be identically distributed across
        the folds, and the loss minimized is the total loss per sample,
        and not the mean loss across the folds.
    cv : int, cross-validation generator or an iterable, optional
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:
          - None, to use the default 3-fold cross-validation,
          - integer, to specify the number of folds.
          - An object to be used as a cross-validation generator.
          - An iterable yielding train/test splits.
        For integer/None inputs, if ``y`` is binary or multiclass,
        :class:`StratifiedKFold` used. If the estimator is a classifier
        or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.
    refit : boolean, default=True
        Refit the best estimator with the entire dataset.
        If "False", it is impossible to make predictions using
        this RandomizedSearchCV instance after fitting.
    verbose : integer
        Controls the verbosity: the higher, the more messages.
    random_state : int or RandomState
        Pseudo random number generator state used for random uniform sampling
        from lists of possible values instead of scipy.stats distributions.
    error_score : 'raise' (default) or numeric
        Value to assign to the score if an error occurs in estimator fitting.
        If set to 'raise', the error is raised. If a numeric value is given,
        FitFailedWarning is raised. This parameter does not affect the refit
        step, which will always raise the error.
    Attributes
    ----------
    grid_scores_ : list of named tuples
        Contains scores for all parameter combinations in param_grid.
        Each entry corresponds to one parameter setting.
        Each named tuple has the attributes:
            * ``parameters``, a dict of parameter settings
            * ``mean_validation_score``, the mean score over the
              cross-validation folds
            * ``cv_validation_scores``, the list of scores for each fold
    best_estimator_ : estimator
        Estimator that was chosen by the search, i.e. estimator
        which gave highest score (or smallest loss if specified)
        on the left out data. Not available if refit=False.
    best_score_ : float
        Score of best_estimator on the left out data.
    best_params_ : dict
        Parameter setting that gave the best results on the hold out data.
    Notes
    -----
    The parameters selected are those that maximize the score of the held-out
    data, according to the scoring parameter.
    If `n_jobs` was set to a value higher than one, the data is copied for each
    parameter setting(and not `n_jobs` times). This is done for efficiency
    reasons if individual jobs take very little time, but may raise errors if
    the dataset is large and not enough memory is available.  A workaround in
    this case is to set `pre_dispatch`. Then, the memory is copied only
    `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
    n_jobs`.
    See Also
    --------
    :class:`GridSearchCV`:
        Does exhaustive search over a grid of parameters.
    :class:`ParameterSampler`:
        A generator over parameter settins, constructed from
        param_distributions.
    """
    def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
                 fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
                 verbose=0, pre_dispatch='2*n_jobs', random_state=None,
                 error_score='raise'):
        self.param_distributions = param_distributions
        self.n_iter = n_iter
        self.random_state = random_state
        super(RandomizedSearchCV, self).__init__(
            estimator=estimator, scoring=scoring, fit_params=fit_params,
            n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
            pre_dispatch=pre_dispatch, error_score=error_score)
    def fit(self, X, y=None):
        """Run fit on the estimator with randomly drawn parameters.
        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vector, where n_samples in the number of samples and
            n_features is the number of features.
        y : array-like, shape = [n_samples] or [n_samples, n_output], optional
            Target relative to X for classification or regression;
            None for unsupervised learning.
        """
        sampled_params = ParameterSampler(self.param_distributions,
                                          self.n_iter,
                                          random_state=self.random_state)
        return self._fit(X, y, sampled_params)
 | 
	bsd-3-clause | 
| 
	deepesch/scikit-learn | 
	sklearn/neighbors/tests/test_dist_metrics.py | 
	230 | 
	5234 | 
	import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
    return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
    version1 = tuple(map(int, version1.split('.')[:2]))
    version2 = tuple(map(int, version2.split('.')[:2]))
    if version1 < version2:
        return -1
    elif version1 > version2:
        return 1
    else:
        return 0
class TestMetrics:
    def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
                 rseed=0, dtype=np.float64):
        np.random.seed(rseed)
        self.X1 = np.random.random((n1, d)).astype(dtype)
        self.X2 = np.random.random((n2, d)).astype(dtype)
        # make boolean arrays: ones and zeros
        self.X1_bool = self.X1.round(0)
        self.X2_bool = self.X2.round(0)
        V = np.random.random((d, d))
        VI = np.dot(V, V.T)
        self.metrics = {'euclidean': {},
                        'cityblock': {},
                        'minkowski': dict(p=(1, 1.5, 2, 3)),
                        'chebyshev': {},
                        'seuclidean': dict(V=(np.random.random(d),)),
                        'wminkowski': dict(p=(1, 1.5, 3),
                                           w=(np.random.random(d),)),
                        'mahalanobis': dict(VI=(VI,)),
                        'hamming': {},
                        'canberra': {},
                        'braycurtis': {}}
        self.bool_metrics = ['matching', 'jaccard', 'dice',
                             'kulsinski', 'rogerstanimoto', 'russellrao',
                             'sokalmichener', 'sokalsneath']
    def test_cdist(self):
        for metric, argdict in self.metrics.items():
            keys = argdict.keys()
            for vals in itertools.product(*argdict.values()):
                kwargs = dict(zip(keys, vals))
                D_true = cdist(self.X1, self.X2, metric, **kwargs)
                yield self.check_cdist, metric, kwargs, D_true
        for metric in self.bool_metrics:
            D_true = cdist(self.X1_bool, self.X2_bool, metric)
            yield self.check_cdist_bool, metric, D_true
    def check_cdist(self, metric, kwargs, D_true):
        if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
            raise SkipTest("Canberra distance incorrect in scipy < 0.9")
        dm = DistanceMetric.get_metric(metric, **kwargs)
        D12 = dm.pairwise(self.X1, self.X2)
        assert_array_almost_equal(D12, D_true)
    def check_cdist_bool(self, metric, D_true):
        dm = DistanceMetric.get_metric(metric)
        D12 = dm.pairwise(self.X1_bool, self.X2_bool)
        assert_array_almost_equal(D12, D_true)
    def test_pdist(self):
        for metric, argdict in self.metrics.items():
            keys = argdict.keys()
            for vals in itertools.product(*argdict.values()):
                kwargs = dict(zip(keys, vals))
                D_true = cdist(self.X1, self.X1, metric, **kwargs)
                yield self.check_pdist, metric, kwargs, D_true
        for metric in self.bool_metrics:
            D_true = cdist(self.X1_bool, self.X1_bool, metric)
            yield self.check_pdist_bool, metric, D_true
    def check_pdist(self, metric, kwargs, D_true):
        if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
            raise SkipTest("Canberra distance incorrect in scipy < 0.9")
        dm = DistanceMetric.get_metric(metric, **kwargs)
        D12 = dm.pairwise(self.X1)
        assert_array_almost_equal(D12, D_true)
    def check_pdist_bool(self, metric, D_true):
        dm = DistanceMetric.get_metric(metric)
        D12 = dm.pairwise(self.X1_bool)
        assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
    def haversine_slow(x1, x2):
        return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
                                     + np.cos(x1[0]) * np.cos(x2[0]) *
                                     np.sin(0.5 * (x1[1] - x2[1])) ** 2))
    X = np.random.random((10, 2))
    haversine = DistanceMetric.get_metric("haversine")
    D1 = haversine.pairwise(X)
    D2 = np.zeros_like(D1)
    for i, x1 in enumerate(X):
        for j, x2 in enumerate(X):
            D2[i, j] = haversine_slow(x1, x2)
    assert_array_almost_equal(D1, D2)
    assert_array_almost_equal(haversine.dist_to_rdist(D1),
                              np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
    X = np.random.random((10, 3))
    euclidean = DistanceMetric.get_metric("euclidean")
    pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
    # Check if both callable metric and predefined metric initialized
    # DistanceMetric object is picklable
    euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
    pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
    D1 = euclidean.pairwise(X)
    D2 = pyfunc.pairwise(X)
    D1_pkl = euclidean_pkl.pairwise(X)
    D2_pkl = pyfunc_pkl.pairwise(X)
    assert_array_almost_equal(D1, D2)
    assert_array_almost_equal(D1_pkl, D2_pkl)
 | 
	bsd-3-clause | 
| 
	sebalander/sebaPhD | 
	resources/PTZgrid/calcInitialCond.py | 
	1 | 
	1966 | 
	# -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 20:21:33 2016
generate the camera's pose  conditions by hand
@author: sebalander
"""
# %%
import cv2
import numpy as np
import numpy.linalg as lin
from scipy.linalg import sqrtm, inv
import matplotlib.pyplot as plt
# %% 
tVecFile = "PTZsheetTvecInitial.npy"
rVecFile = "PTZsheetRvecInitial.npy"
# %% Initial TRASLATION VECTOR
tVec = np.array([[0], [0], [2.5]]) 
# %% ROTATION MATRIX
# center of image points to grid point:
center = np.array([3*0.21, 5*0.297, 0])
z = center - tVec[:,0]
z /= lin.norm(z)
# la tercera coordenada no la se, la dejo en cero
x = np.array([6*21, -1*29.7, 0])
y = np.array([-1*21, -7*29.7, 0])
# hacer que x,y sean perp a z, agregar la tercera componente 
x = x - z * np.dot(x,z) # hago perpendicular a z
x /= lin.norm(x) 
y = y - z * np.dot(y,z) # hago perpendicular a z
y /= lin.norm(y)
# %% test ortogonal
np.dot(x,z)
np.dot(y,z)
np.dot(x,y) # ok if not perfectly 0
# %% make into versor matrix
rMatrix = np.array([x,y,z])
# find nearest ortogonal matrix
# http://stackoverflow.com/questions/13940056/orthogonalize-matrix-numpy
rMatrix = rMatrix.dot(inv(sqrtm(rMatrix.T.dot(rMatrix))))
# %% SAVE PARAMETERS
# convert to rodrigues vector
rVec, _ = cv2.Rodrigues(rMatrix)
np.save(tVecFile, tVec)
np.save(rVecFile, rVec)
# %% PLOT VECTORS
[x,y,z] = rMatrix # get from ortogonal matrix
tvec = tVec[:,0]
fig = plt.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = fig.gca(projection='3d')
ax.plot([0, tvec[0]], 
        [0, tvec[1]],
        [0, tvec[2]])
        
ax.plot([tvec[0], tvec[0] + x[0]],
        [tvec[1], tvec[1] + x[1]],
        [tvec[2], tvec[2] + x[2]])
ax.plot([tvec[0], tvec[0] + y[0]],
        [tvec[1], tvec[1] + y[1]],
        [tvec[2], tvec[2] + y[2]])
ax.plot([tvec[0], tvec[0] + z[0]],
        [tvec[1], tvec[1] + z[1]],
        [tvec[2], tvec[2] + z[2]])
#ax.legend()
#ax.set_xlim3d(0, 1)
#ax.set_ylim3d(0, 1)
#ax.set_zlim3d(0, 1)
plt.show() | 
	bsd-3-clause | 
| 
	serv-terr/pbl_met | 
	examples/alamo_movie.py | 
	1 | 
	2964 | 
	#!/usr/bin/env python3
import os
import sys
import time
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as anima
import struct
FFMpegWriter = anima.writers['ffmpeg']
metadata = dict(title="Airflow Movie", artist="ALAMO", comment="Movie generated automatically")
writer = FFMpegWriter(fps=30, metadata=metadata)
if __name__ == "__main__":
	# Check input parameters
	if len(sys.argv) != 3:
		print("alamo_movie.py:: error: Invalid argument list")
		print(" ")
		print("Usage:")
		print(" ")
		print("  ./alamo_movie.py <alamo_snaps_file> <alamo_movie_file>")
		print(" ")
		print("by: Mauri Favaron - 2019")
		sys.exit(1)
	snapsFile = sys.argv[1]
	movieFile = sys.argv[2]
	
	# Find snaps in snaps path
	try:
		guideFile = snapsFile + ".guide.txt"
		f = open(guideFile, "r")
		lines = f.readlines()
		f.close()
	except:
		print("alamo_movie.py:: error: No or invalid guide file")
		sys.exit(3)
	if len(lines) <= 0:
		print("alamo_movie.py:: error: No or invalid guide file")
		sys.exit(3)
		
	# Get plotting extrema from the guide file
	data = np.loadtxt(guideFile)
	xmin = data[0]
	xmax = data[1]
	ymin = data[2]
	ymax = data[3]
	zmin = data[4]
	zmax = data[5]
	dx   = data[6]
	dy   = data[7]
	amax = data[8]
	
	# Get number of snapshots
	listFile = snapsFile + ".lst"
	g = open(listFile, "r")
	lines = g.readlines()
	g.close()
	numSnaps = len(lines)
	
	# Process snapshots file
	fig = plt.figure()
	f = open(snapsFile, "rb")
	header = f.read(6*8)
	(xmin, xmax, ymin, ymax, zmin, zmax) = struct.unpack("dddddd", header)
	with writer.saving(fig, movieFile, dpi=100):
		while True:
	
			# Get start of block info
			try:
				blkStart = f.read(12)
			except:
				break
			(timeStamp, numData) = struct.unpack("di", blkStart)
			timeString = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timeStamp))
		
			# Get data from snapshot, and retrieve particles coordinates from it
			snapBlock = f.read(5*numData*4)
			fmt       = "@%df" % (5*numData)
			snapData = np.array(struct.unpack(fmt, snapBlock))
			if snapData.size > 0:
				snapMatrix = snapData.reshape((-1,5), order='C')
				xp = snapMatrix[:,0]
				yp = snapMatrix[:,1]
				ap = snapMatrix[:,4] / amax
				xpmin    = np.min(xp)
				ypmin    = np.min(yp)
				xpmax    = np.max(xp)	
				ypmax    = np.max(yp)	
			else:
				xp       = np.array([])
				yp       = np.array([])
				ap       = np.array([])
				xpmin    = 0.0
				ypmin    = 0.0
				xpmax    = 0.0
				ypmax    = 0.0
		
			# Generate frame
			ax = fig.add_subplot(111, aspect='equal')
			levels = np.array([(0,0,0,a) for a in ap])
			ax.scatter(xp, yp, s=0.1, color=levels)
			plt.xlim(xmin,xmax)
			plt.ylim(ymin,ymax)
			plt.title(timeString[0:13])
			writer.grab_frame()
			fig.delaxes(ax)
		
			# Inform users
			print("Processed: %s - Parts: %d - Min: (%f,%f)  Max: (%f,%f)" % (timeString, snapData.size, xpmin, ypmin, xpmax, ypmax))
		
	# Leave
	plt.close()
	f.close()
 | 
	lgpl-3.0 | 
| 
	chris-hld/sfs-python | 
	doc/examples/plot_particle_density.py | 
	1 | 
	1310 | 
	""" Example for particle density visualization of sound sources """
import numpy as np
import matplotlib.pyplot as plt
import sfs
# simulation parameters
pw_angle = 45  # traveling direction of plane wave
xs = [0, 0, 0]  # source position
f = 300  # frequency
# angular frequency
omega = 2 * np.pi * f
# normal vector of plane wave
npw = sfs.util.direction_vector(np.radians(pw_angle))
# random grid for velocity
grid = [np.random.uniform(-3, 3, 40000), np.random.uniform(-3, 3, 40000), 0]
def plot_particle_displacement(title):
    # compute displacement
    X = grid + amplitude * sfs.util.displacement(v, omega)
    # plot displacement
    plt.figure(figsize=(15, 15))
    plt.cla()
    sfs.plot.particles(X, facecolor='black', s=3, trim=[-3, 3, -3, 3])
    plt.axis('off')
    plt.title(title)
    plt.grid()
    plt.savefig(title + '.png')
# point source
v = sfs.mono.source.point_velocity(omega, xs, npw, grid)
amplitude = 1.5e6
plot_particle_displacement('particle_displacement_point_source')
# line source
v = sfs.mono.source.line_velocity(omega, xs, npw, grid)
amplitude = 1.3e6
plot_particle_displacement('particle_displacement_line_source')
# plane wave
v = sfs.mono.source.plane_velocity(omega, xs, npw, grid)
amplitude = 1e5
plot_particle_displacement('particle_displacement_plane_wave')
 | 
	mit | 
| 
	huzq/scikit-learn | 
	sklearn/cross_decomposition/_cca.py | 
	3 | 
	3317 | 
	from ._pls import _PLS
from ..base import _UnstableArchMixin
from ..utils.validation import _deprecate_positional_args
__all__ = ['CCA']
class CCA(_UnstableArchMixin, _PLS):
    """CCA Canonical Correlation Analysis.
    CCA inherits from PLS with mode="B" and deflation_mode="canonical".
    Read more in the :ref:`User Guide <cross_decomposition>`.
    Parameters
    ----------
    n_components : int, default=2
        number of components to keep.
    scale : boolean, default=True
        whether to scale the data?
    max_iter : an integer, default=500
        the maximum number of iterations of the NIPALS inner loop
    tol : non-negative real, default=1e-06.
        the tolerance used in the iterative algorithm
    copy : boolean, default=True
        Whether the deflation be done on a copy. Let the default value
        to True unless you don't care about side effects
    Attributes
    ----------
    x_weights_ : array, [p, n_components]
        X block weights vectors.
    y_weights_ : array, [q, n_components]
        Y block weights vectors.
    x_loadings_ : array, [p, n_components]
        X block loadings vectors.
    y_loadings_ : array, [q, n_components]
        Y block loadings vectors.
    x_scores_ : array, [n_samples, n_components]
        X scores.
    y_scores_ : array, [n_samples, n_components]
        Y scores.
    x_rotations_ : array, [p, n_components]
        X block to latents rotations.
    y_rotations_ : array, [q, n_components]
        Y block to latents rotations.
    coef_ : array of shape (p, q)
        The coefficients of the linear model: ``Y = X coef_ + Err``
    n_iter_ : array-like
        Number of iterations of the NIPALS inner loop for each
        component.
    Notes
    -----
    For each component k, find the weights u, v that maximizes
    max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
    Note that it maximizes only the correlations between the scores.
    The residual matrix of X (Xk+1) block is obtained by the deflation on the
    current X score: x_score.
    The residual matrix of Y (Yk+1) block is obtained by deflation on the
    current Y score.
    Examples
    --------
    >>> from sklearn.cross_decomposition import CCA
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
    >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> cca = CCA(n_components=1)
    >>> cca.fit(X, Y)
    CCA(n_components=1)
    >>> X_c, Y_c = cca.transform(X, Y)
    References
    ----------
    Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
    emphasis on the two-block case. Technical Report 371, Department of
    Statistics, University of Washington, Seattle, 2000.
    In french but still a reference:
    Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
    Editions Technic.
    See also
    --------
    PLSCanonical
    PLSSVD
    """
    @_deprecate_positional_args
    def __init__(self, n_components=2, *, scale=True,
                 max_iter=500, tol=1e-06, copy=True):
        super().__init__(n_components=n_components, scale=scale,
                         deflation_mode="canonical", mode="B",
                         norm_y_weights=True, algorithm="nipals",
                         max_iter=max_iter, tol=tol, copy=copy)
 | 
	bsd-3-clause | 
| 
	KujawinskiLaboratory/NB_Distribution | 
	fxn_gatherDetails.py | 
	1 | 
	17914 | 
	#make one py file with all the plotting functions and information gathering
#KLongnecker, 13 April 2016, updated 4/15/2016, updated 4/18/2016
import pandas as pd
import os
import re
import matplotlib.pyplot as plt
import palettable as pal
import glob
from Bio import SeqIO
from Bio.KEGG.REST import *
from Bio.KEGG.KGML import KGML_parser
from Bio.Graphics.KGML_vis import KGMLCanvas
from IPython.display import Image, HTML
   
def gatherDetails(makeNclusters,trimPath,forRelatedness,folderName,CO_fromMATLAB,KO_Norm2Mean,Insitu_TPM_DIA,Insitu_TPM_DIN,Insitu_TPM_Oth):
    colLabel = ['nCpds','nGenes'] #starting with this is easiest - makes one list, no need to flatten
    for item in range(makeNclusters):
        colLabel.append('Km' + str(item) + '_cpd')
        colLabel.append('Km' + str(item) + '_gene')
    gatherCounts = pd.DataFrame(0, index = trimPath, columns = colLabel)
    #setup the strings to match first
    rnString = re.compile('(?:[rn:R])(\d+)$') #will return R00190
    cpdString = re.compile('(?:[cpd:C])(\d+)$') #will return C00190
    size = 20 #turns out I can increase the size of the compounds in the plots
    for kn in range(makeNclusters):
        fullSet = set(forRelatedness.KEGG)
        oneK = forRelatedness[forRelatedness.kmeans == kn] #get gene & transcript information for one Kmeans group
        getKm = 'Km' + str(kn)
        #check if the directories exist, one for pathway files
        directoryPDF = folderName + str(kn) + '/pathway_files'
        if not os.path.exists(directoryPDF):
            os.makedirs(directoryPDF)
        else:
            raise ValueError('Krista - be careful, this folder already exists')
        #check if the directories exist, one for reaction files
        directoryPNG = folderName + str(kn) + '/reaction_files'
        if not os.path.exists(directoryPNG):
            os.makedirs(directoryPNG) 
        else:
            raise ValueError('Krista - be careful, this folder already exists')
                       
        #check if the directories exist, one for species 
        directorySpecies = folderName + str(kn) + '/species_files'
        if not os.path.exists(directorySpecies):
            os.makedirs(directorySpecies) 
        else:
            raise ValueError('Krista - be careful, this folder already exists')
                    
        for item in trimPath: #searching within one pathway at a time
            plotPathway = [] #gather up yes/no and will only plot if have linked genes/mtabs    
            genes = getKfrom_ko(item)
            compounds = getCfrom_ko(item)
            gatherCounts.loc[item,'nCpds'] = len(compounds)
            gatherCounts.loc[item,'nGenes'] = len(genes)     
            #have to track genes and compounds differently for the biopython plotting later on 
            setG = set(genes)
            setC = set(compounds)
            setB = set(oneK.KEGG)
            intGenes = setG.intersection(setB)
            intCompounds = setC.intersection(setB)
            gatherCounts.loc[item,(getKm + '_gene')] = len(intGenes)
            gatherCounts.loc[item,(getKm + '_cpd')] = len(intCompounds)
            for gen in intGenes: #go through each gene...one at a time
                rnList = kegg_link('reaction',gen).read() #get the list of reactions for that gene
                #can have cases where there is a gene and no reaction (K02906 for example). This returns rnList = '\n'
                #since this is not actually empty...need a few way to filter those out
                test = '\n'
                if test != rnList:
                    for line in rnList.rstrip().split('\n'):
                        countCpd = []
                        countGene = []
                        m = rnString.search(line) #get the reaction number
                        cpdList = kegg_link('cpd',m.group(0)).read() #now go get the compounds for that reaction
                        #can have no compounds in a reaction (only glycans, begin with G, nothing I have matched)
                        if len(cpdList) > 1: #will be true if cpdList includes compounds
                            for line2 in cpdList.rstrip().split('\n'):
                                m2 = cpdString.search(line2).group(0)
                                #now that I have a compound, check if it is in intCompounds
                                if m2 in intCompounds:
                                    countCpd.append(m2) 
                                    countGene.append(gen)
                                    plotPathway.append('yes')
                        ##Now, plot the PNG files (one for each reaction within a pathway)
                        if len(countCpd) > 0:
                            dayList = ['S1','S2','S3','S4','S5']
                            kData = pd.DataFrame(columns = dayList)
                            for k in set(countGene):
                                kData = kData.append(oneK.ix[k,dayList])
                            cData = pd.DataFrame(columns = dayList)
                            for co in set(countCpd):
                                #convert CO to RI, can have multiple options
                                j = findRInumber(oneK,co)
                                cData = cData.append(oneK.loc[j,dayList])
                            fig,ax = plt.subplots(1)
                            cData.T.plot(color = 'k',ax=ax)
                            kData.T.plot(color = 'r',ax=ax)
                            handles, labels = ax.get_legend_handles_labels()
                            #convert the RI numbers to COnumbers for the figure
                            for ia, a in enumerate(labels):
                                #add compound/gene name to the legend
                                if a[0]== 'R':
                                    tLabel = convertRItoCO(CO_fromMATLAB,a)
                                    fn = kegg_list(tLabel).read()                          
                                    labels[ia] = fn
                                elif a[0] == 'K':
                                    fn = kegg_list(a).read()
                                    labels[ia] = fn
                            ax.legend(handles, labels, bbox_to_anchor = ([-1, 0.5]))
                            fig.suptitle('pathway ' + item + ', Kmeans grp ' + str(kn))
                            pngName = 'pathway' + item + '_' + m.group(0) + '.png'
                            fig.savefig(directoryPNG + '/' + pngName, bbox_inches = 'tight')
                            pngName = None #empty it in case that is where I am having issues
                            plt.close()
            if len(plotPathway)>0:
                ## plot the pathway map for this pathway, get details from KEGG for plotting
                useColors = pal.colorbrewer.qualitative.Set1_4.hex_colors
                useColors.insert(0,'#f7f7f7') ## insert white at beginning
                # order of colors: white, red, blue,green,purple
                sd = 0 #not in dataset
                sk = 1 #in K means group and pathway
                sa = 2 #in pathway, in any K means (for genes, bc overlap in numbers)
                sn = 3 #in pathway, not in K means group (compounds only)               
                su = 4 #unconnected gene or compound
                line1 = useColors[sd] + ', not in dataset' + '\n'
                line2 = useColors[sk] + ', in K means group and pathway' + '\n'
                line3 = useColors[sa] + ', #in pathway, in any K means (for genes, bc overlap in numbers)' +'\n'
                line4 = useColors[sn] +  ', #in pathway, not in K means group (compounds only)' + '\n'               
                line5 = useColors[su] + ', #unconnected gene or compound' + '\n'
                file = open("readme_colorsInPathways.txt", "w")
                file.write(line1 + line2 + line3 + line4 + line5)
                file.close()
                
                pathway = KGML_parser.read(kegg_get(item, "kgml"))
                for element in pathway.orthologs:
                    #print element.name
                    for graphic in element.graphics:
                        tg = element.name[3:9] #skip over the 'ko:'
                        if (tg in intGenes):
                            #in the pathway AND in the set for this particular K means group
                            graphic.bgcolor = useColors[sk] #
                            
                            #if this is something in the pathway, plot up the species for the K number
                            if tg in Insitu_TPM_DIA.index.tolist():
                                Dk=Insitu_TPM_DIA.loc[tg]
                            else: 
                                Dk = 0/Insitu_TPM_DIA.iloc[0] #make an empty frame
                            if tg in Insitu_TPM_DIN.index.tolist():
                                Nk=Insitu_TPM_DIN.loc[tg]
                            else:
                                Nk = 0/Insitu_TPM_DIN.iloc[0]
                            if tg in Insitu_TPM_Oth.index.tolist():
                                Ok=Insitu_TPM_Oth.loc[tg]
                            else:
                                Ok = 0/Insitu_TPM_Oth.iloc[0]
                            fig,ax=plt.subplots(1)
                            ax.stackplot(range(5), Dk, Nk, Ok, colors=pal.colorbrewer.qualitative.Set3_6_r.hex_colors, lw=0)
                            ax.set_xticks(range(5))
                            ax.set_xticklabels([1,2,3,4,5])
                            ax.set_ylabel('In situ TPM')
                            plt.title(tg + ', lt orange=diatoms, blue=dinos, dk orange=other')
                            fig.savefig(directorySpecies + '/' + tg + '_species.png',bbox_inches='tight')
                            plt.close()
                        elif (tg in fullSet) and (tg in genes) and (tg not in intGenes):
                            #in the pathway AND in the set of genes from RI, allow any Kmeans group for genes
                            graphic.bgcolor = useColors[sa] #
                        elif (tg not in fullSet) and (tg in genes) and (tg not in KO_Norm2Mean.index.tolist()):
                            #in the pathway, but *not* in anything from the RI samples
                            graphic.bgcolor = useColors[sd] #
                        elif (tg not in fullSet) and (tg in genes) and (tg in KO_Norm2Mean.index.tolist()): 
                            #an unconnected gene in the RI data
                            graphic.bgcolor = useColors[su] #
                # Change the colours of compounds (mostly same as genes
                for element in pathway.compounds:
                    for graphic in element.graphics:
                        tc = element.name[4:10] #skip over the 'cpd:'
                        if (tc in intCompounds):
                            #in the pathway AND in the set for this particular K means group
                            graphic.bgcolor = useColors[sk] #
                            graphic.width = size
                            graphic.height = size
                        elif (tc in fullSet) and (tc in compounds) and (tc not in intCompounds):
                            #in the pathway AND in the set of compounds from RI, but *not* in this Kmeans group
                            graphic.bgcolor = useColors[sn] #
                            graphic.width = size
                            graphic.height = size
                        elif (tc not in fullSet) and (tc in compounds) and (tc not in CO_fromMATLAB.cNumber.values):
                            #in the pathway, but *not* in anything from the RI samples
                            graphic.bgcolor = useColors[sd] #  
                        elif (tc not in fullSet) and (tc in compounds) and (tc in CO_fromMATLAB.cNumber.values): #seems like a hack
                            #unconnected compound in the RI data
                            graphic.bgcolor = useColors[su] #
                            graphic.width = size
                            graphic.height = size
                canvas = KGMLCanvas(pathway, import_imagemap=True)
                pdfName = 'mapWithColors_' + str(item) + '.pdf'
                canvas.draw(directoryPDF + '/' + pdfName)
                pdfName = None #empty it in case that is where I am having issues
    #stick the pathway information into gatherCounts before I export...
    #want to export gatherCounts, with the added pathway name as a new column
    gatherCounts['pathwayInfo'] = ''
    gatherCounts['pathwayGroup_A'] = ''
    gatherCounts['pathwayGroup_B'] = ''
    gatherCounts['pathwayGroup_C'] = ''
    #go read in the file from KEGG
    D = glob.glob('br08901.keg') #from http://www.genome.jp/kegg-bin/get_htext?br08901.keg; 3/15/2016
    allBRITE=[]
    for idx,nof in enumerate(D):
        allBRITE = ReadBRITEfile(nof) 
    #put the pathway name and group into the data frame before exporting it
    for item in gatherCounts.index:
        #if this error appears: IndexError: index 0 is out of bounds for axis 0 with size 0
        #KEGG has updated a pathway, but not the BRITE file (see below for work around)
        pathstr = kegg_list(item).read()
        #this next line splits the string at the '\t', then keeps the piece at index = 1, and strips off the '\n'
        gatherCounts.loc[item,('pathwayInfo')] = pathstr.split('\t')[1].rstrip()
        t = allBRITE.loc[allBRITE['map']==item[2:]]  
        #put in a check to see if t.empty ...will be empty if KEGG updated pathway and not BRITE file
        if t.empty is False: 
            gatherCounts.set_value(item,'pathwayGroup_A',t['A'].values[0])
            gatherCounts.set_value(item,'pathwayGroup_B',t['B'].values[0])
            gatherCounts.set_value(item,'pathwayGroup_C',t['C'].values[0])
    
    return gatherCounts
#set up a function to get the list of K orthologues for a given pathway (must be defined as ko00140 NOT map00140)
def getKfrom_ko(ko_id):
    pathway_file = kegg_get(ko_id).read()  # query and read the pathway
    K_list = []
    current_section = None
    for line in pathway_file.rstrip().split("\n"):
        section = line[:12].strip()  # section names are within 12 columns
        if not section == "":
            current_section = section
        if current_section == "ORTHOLOGY":
            K_identifiers = line[12:].split("; ")
            t = K_identifiers[0]
            K_id = t[0:6]
            if not K_id in K_list:
                K_list.append(K_id)
    return K_list
#set up a function to get the list of compounds for a given pathway (must be defined as ko00140 NOT map00140)
def getCfrom_ko(ko_id):
    pathway_file = kegg_get(ko_id).read()  # query and read the pathway
    compound_list = []
    current_section = None
    for line in pathway_file.rstrip().split("\n"):
        section = line[:12].strip()  # section names are within 12 columns
        if not section == "":
            current_section = section
        if current_section == "COMPOUND":
            compound_identifiers = line[12:].split("; ")
            t = compound_identifiers[0]
            compound_id = t[0:6]
            if not compound_id in compound_list:
                compound_list.append(compound_id)
    return compound_list
def findRInumber(dataIn,KEGGin):
    #find possible RI numbers for a given KEGG number. 
    dataOut = []
    for i,KEGG in enumerate(dataIn['KEGG']):
        if KEGG == KEGGin:
            t = dataIn.index[i]
            dataOut.append(t)
    return dataOut
def convertRItoCO(dataIn,RIin):
    #do the reverse, given an RInumber find the cNumber
    dataOut = dataIn.loc[RIin].loc['cNumber']
    return dataOut
# A bit of code that will help us display the PDF output
def PDF(filename):
    return HTML('<iframe src=%s width=700 height=350></iframe>' % filename)
# A bit of helper code to shorten long text
def head(text, lines=10):
    """ Print the first lines lines of the passed text.
    """
    print '\n'.join(text.split('\n')[:lines] + ['[...]'])
#organize pathways into the groups defined in the BRITE file
def ReadBRITEfile(briteFile):
    forBrite = pd.DataFrame(columns = ['map','A','B','C','wholeThing'])
    # set up the expressions to match each level in the BRITE hierarchy
    
    textA = re.compile(r'(^A<b>)(.+)(</b>)\s*(.*)$')
    textB = re.compile(r'(^B)\s*(.*)$')
    textC = re.compile(r'(\d+)\s*(.*)$')
    #this relies on the fact that the rows are in order: A, with B subheadings, then C subheadings
    setA = []
    idxA = []
    setB = []
    setC = []
    with open(briteFile) as f:
        for idx,line in enumerate(f):
            if line[0] is not '#': #skip over the comments
                mA = textA.search(line) 
                mB = textB.search(line) 
                mC = textC.search(line) 
                if mA:
                    setA = mA.group(2)
                    #house cleaning (probably c)
                    idxA = idx
                    forBrite.loc[idx,'A'] = setA
                    forBrite.loc[idx,'wholeThing'] = line #using this as a double check for now
                    #forBrite.loc[idx,'map'] = mC.group(1)
                elif mB:
                    setB = mB.group(2)
                    forBrite.loc[idx,'A'] = setA
                    forBrite.loc[idx,'B'] = setB
                    forBrite.loc[idx,'wholeThing'] = line
                    #forBrite.loc[idx,'map'] = mC.group(1)
                elif mC:
                    #Tracer()()
                    setC = mC.group(2)
                    forBrite.loc[idx,'A'] = setA
                    forBrite.loc[idx,'B'] = setB
                    forBrite.loc[idx,'C'] = setC
                    forBrite.loc[idx,'wholeThing'] = line
                    forBrite.loc[idx,'map'] = mC.group(1)
        return forBrite
 | 
	mit | 
| 
	Titan-C/learn-dmft | 
	dmft/twosite_func.py | 
	1 | 
	1912 | 
	# -*- coding: utf-8 -*-
"""
Created on Thu Dec  4 00:22:51 2014
@author: oscar
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.integrate import simps
from slaveparticles.quantum import dos
import matplotlib.pyplot as plt
from scipy.optimize import fsolve, curve_fit
def lattice_gf(sim, x=np.linspace(-4, 4, 600), wide=5e-3):
    """Compute lattice green function
    .. math::
        G(\\omega) = \\int \\frac{\\rho_0(x) dx}{\\omega
        + i\\eta + \\mu - \\Sigma(w) - x }"""
    G = []
    var = sim.omega + sim.mu - sim.GF[r'$\Sigma$'] + 1j*wide
    for w in var:
        integrable = sim.rho_0/(w - x)
        G.append(simps(integrable, x))
    return np.asarray(G)
def two_pole(w, alpha_0, alpha_1, alpha_2, omega_1, omega_2):
    r"""This function evaluates a two pole real function in the shape
    .. math:: \Sigma(\omega)=\alpha_0 + \frac{\alpha_1}{\omega - \omega_1}
        +\frac{\alpha_2}{\omega - \omega_2}"""
    return alpha_0 + alpha_1/(w - omega_1) + alpha_2/(w - omega_2)
def fit_sigma(sim):
    """Fits the self-energy into its analytical two pole form"""
    w = sim.omega
    sigma = sim.GF[r'$\Sigma$']
    return curve_fit(two_pole, w, sigma)
def out_plot(sim, spec, label=''):
    w = sim.omega.imag
    stl = '+-'
    if sim.freq_axis == 'real':
        w = sim.omega.real
        stl = '-'
    for gfp in spec.split():
        if 'impG' == gfp:
            key = 'Imp G'
        if 'impG0' in gfp:
            key = 'Imp G$_0$'
        if 'sigma' == gfp:
            key = r'$\Sigma$'
        if 'G' == gfp:
            key = 'Lat G'
        if 'A' == gfp:
            plt.plot(w, sim.interacting_dos(sim.mu), stl, label='A '+label)
            continue
        plt.plot(w, sim.GF[key].real, stl, label='Re {} {}'.format(key, label))
        plt.plot(w, sim.GF[key].imag, stl+'-', label='Im {} {}'.format(key, label))
 | 
	gpl-3.0 | 
| 
	pjryan126/solid-start-careers | 
	store/api/zillow/venv/lib/python2.7/site-packages/pandas/io/tests/test_json/test_ujson.py | 
	1 | 
	55567 | 
	# -*- coding: utf-8 -*-
from unittest import TestCase
try:
    import json
except ImportError:
    import simplejson as json
import math
import nose
import platform
import sys
import time
import datetime
import calendar
import re
import decimal
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas.json as ujson
import pandas.compat as compat
import numpy as np
from numpy.testing import (assert_array_almost_equal_nulp,
                           assert_approx_equal)
import pytz
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
def _skip_if_python_ver(skip_major, skip_minor=None):
    major, minor = sys.version_info[:2]
    if major == skip_major and (skip_minor is None or minor == skip_minor):
        raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if compat.PY3
                else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
    def test_encodeDecimal(self):
        sut = decimal.Decimal("1337.1337")
        encoded = ujson.encode(sut, double_precision=15)
        decoded = ujson.decode(encoded)
        self.assertEqual(decoded, 1337.1337)
    def test_encodeStringConversion(self):
        input = "A string \\ / \b \f \n \r \t </script> &"
        not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
                            '\\r \\t <\\/script> &"')
        html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
                        '\\u003c\\/script\\u003e \\u0026"')
        def helper(expected_output, **encode_kwargs):
            output = ujson.encode(input, **encode_kwargs)
            self.assertEqual(input, json.loads(output))
            self.assertEqual(output, expected_output)
            self.assertEqual(input, ujson.decode(output))
        # Default behavior assumes encode_html_chars=False.
        helper(not_html_encoded, ensure_ascii=True)
        helper(not_html_encoded, ensure_ascii=False)
        # Make sure explicit encode_html_chars=False works.
        helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
        helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
        # Make sure explicit encode_html_chars=True does the encoding.
        helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
        helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
    def test_doubleLongIssue(self):
        sut = {u('a'): -4342969734183514}
        encoded = json.dumps(sut)
        decoded = json.loads(encoded)
        self.assertEqual(sut, decoded)
        encoded = ujson.encode(sut, double_precision=15)
        decoded = ujson.decode(encoded)
        self.assertEqual(sut, decoded)
    def test_doubleLongDecimalIssue(self):
        sut = {u('a'): -12345678901234.56789012}
        encoded = json.dumps(sut)
        decoded = json.loads(encoded)
        self.assertEqual(sut, decoded)
        encoded = ujson.encode(sut, double_precision=15)
        decoded = ujson.decode(encoded)
        self.assertEqual(sut, decoded)
    def test_encodeNonCLocale(self):
        import locale
        savedlocale = locale.getlocale(locale.LC_NUMERIC)
        try:
            locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
        except:
            try:
                locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
            except:
                raise nose.SkipTest('Could not set locale for testing')
        self.assertEqual(ujson.loads(ujson.dumps(4.78e60)), 4.78e60)
        self.assertEqual(ujson.loads('4.78', precise_float=True), 4.78)
        locale.setlocale(locale.LC_NUMERIC, savedlocale)
    def test_encodeDecodeLongDecimal(self):
        sut = {u('a'): -528656961.4399388}
        encoded = ujson.dumps(sut, double_precision=15)
        ujson.decode(encoded)
    def test_decimalDecodeTestPrecise(self):
        sut = {u('a'): 4.56}
        encoded = ujson.encode(sut)
        decoded = ujson.decode(encoded, precise_float=True)
        self.assertEqual(sut, decoded)
    def test_encodeDoubleTinyExponential(self):
        if compat.is_platform_windows() and not compat.PY3:
            raise nose.SkipTest("buggy on win-64 for py2")
        num = 1e-40
        self.assertEqual(num, ujson.decode(ujson.encode(num)))
        num = 1e-100
        self.assertEqual(num, ujson.decode(ujson.encode(num)))
        num = -1e-45
        self.assertEqual(num, ujson.decode(ujson.encode(num)))
        num = -1e-145
        self.assertTrue(np.allclose(num, ujson.decode(ujson.encode(num))))
    def test_encodeDictWithUnicodeKeys(self):
        input = {u("key1"): u("value1"), u("key1"):
                 u("value1"), u("key1"): u("value1"),
                 u("key1"): u("value1"), u("key1"):
                 u("value1"), u("key1"): u("value1")}
        output = ujson.encode(input)
        input = {u("بن"): u("value1"), u("بن"): u("value1"),
                 u("بن"): u("value1"), u("بن"): u("value1"),
                 u("بن"): u("value1"), u("بن"): u("value1"),
                 u("بن"): u("value1")}
        output = ujson.encode(input)  # noqa
    def test_encodeDoubleConversion(self):
        input = math.pi
        output = ujson.encode(input)
        self.assertEqual(round(input, 5), round(json.loads(output), 5))
        self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
    def test_encodeWithDecimal(self):
        input = 1.0
        output = ujson.encode(input)
        self.assertEqual(output, "1.0")
    def test_encodeDoubleNegConversion(self):
        input = -math.pi
        output = ujson.encode(input)
        self.assertEqual(round(input, 5), round(json.loads(output), 5))
        self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
    def test_encodeArrayOfNestedArrays(self):
        input = [[[[]]]] * 20
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        # self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        input = np.array(input)
        tm.assert_numpy_array_equal(input, ujson.decode(
            output, numpy=True, dtype=input.dtype))
    def test_encodeArrayOfDoubles(self):
        input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        # self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        tm.assert_numpy_array_equal(
            np.array(input), ujson.decode(output, numpy=True))
    def test_doublePrecisionTest(self):
        input = 30.012345678901234
        output = ujson.encode(input, double_precision=15)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(input, ujson.decode(output))
        output = ujson.encode(input, double_precision=9)
        self.assertEqual(round(input, 9), json.loads(output))
        self.assertEqual(round(input, 9), ujson.decode(output))
        output = ujson.encode(input, double_precision=3)
        self.assertEqual(round(input, 3), json.loads(output))
        self.assertEqual(round(input, 3), ujson.decode(output))
    def test_invalidDoublePrecision(self):
        input = 30.12345678901234567890
        self.assertRaises(ValueError, ujson.encode, input, double_precision=20)
        self.assertRaises(ValueError, ujson.encode, input, double_precision=-1)
        # will throw typeError
        self.assertRaises(TypeError, ujson.encode, input, double_precision='9')
        # will throw typeError
        self.assertRaises(TypeError, ujson.encode,
                          input, double_precision=None)
    def test_encodeStringConversion2(self):
        input = "A string \\ / \b \f \n \r \t"
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_decodeUnicodeConversion(self):
        pass
    def test_encodeUnicodeConversion1(self):
        input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
        enc = ujson.encode(input)
        dec = ujson.decode(enc)
        self.assertEqual(enc, json_unicode(input))
        self.assertEqual(dec, json.loads(enc))
    def test_encodeControlEscaping(self):
        input = "\x19"
        enc = ujson.encode(input)
        dec = ujson.decode(enc)
        self.assertEqual(input, dec)
        self.assertEqual(enc, json_unicode(input))
    def test_encodeUnicodeConversion2(self):
        input = "\xe6\x97\xa5\xd1\x88"
        enc = ujson.encode(input)
        dec = ujson.decode(enc)
        self.assertEqual(enc, json_unicode(input))
        self.assertEqual(dec, json.loads(enc))
    def test_encodeUnicodeSurrogatePair(self):
        _skip_if_python_ver(2, 5)
        _skip_if_python_ver(2, 6)
        input = "\xf0\x90\x8d\x86"
        enc = ujson.encode(input)
        dec = ujson.decode(enc)
        self.assertEqual(enc, json_unicode(input))
        self.assertEqual(dec, json.loads(enc))
    def test_encodeUnicode4BytesUTF8(self):
        _skip_if_python_ver(2, 5)
        _skip_if_python_ver(2, 6)
        input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
        enc = ujson.encode(input)
        dec = ujson.decode(enc)
        self.assertEqual(enc, json_unicode(input))
        self.assertEqual(dec, json.loads(enc))
    def test_encodeUnicode4BytesUTF8Highest(self):
        _skip_if_python_ver(2, 5)
        _skip_if_python_ver(2, 6)
        input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
        enc = ujson.encode(input)
        dec = ujson.decode(enc)
        self.assertEqual(enc, json_unicode(input))
        self.assertEqual(dec, json.loads(enc))
    def test_encodeArrayInArray(self):
        input = [[[[]]]]
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        tm.assert_numpy_array_equal(
            np.array(input), ujson.decode(output, numpy=True))
        pass
    def test_encodeIntConversion(self):
        input = 31337
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_encodeIntNegConversion(self):
        input = -31337
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_encodeLongNegConversion(self):
        input = -9223372036854775808
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
    def test_encodeListConversion(self):
        input = [1, 2, 3, 4]
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(input, ujson.decode(output))
        tm.assert_numpy_array_equal(
            np.array(input), ujson.decode(output, numpy=True))
        pass
    def test_encodeDictConversion(self):
        input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
        output = ujson.encode(input)  # noqa
        self.assertEqual(input, json.loads(output))
        self.assertEqual(input, ujson.decode(output))
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_encodeNoneConversion(self):
        input = None
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_encodeTrueConversion(self):
        input = True
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_encodeFalseConversion(self):
        input = False
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
    def test_encodeDatetimeConversion(self):
        ts = time.time()
        input = datetime.datetime.fromtimestamp(ts)
        output = ujson.encode(input, date_unit='s')
        expected = calendar.timegm(input.utctimetuple())
        self.assertEqual(int(expected), json.loads(output))
        self.assertEqual(int(expected), ujson.decode(output))
    def test_encodeDateConversion(self):
        ts = time.time()
        input = datetime.date.fromtimestamp(ts)
        output = ujson.encode(input, date_unit='s')
        tup = (input.year, input.month, input.day, 0, 0, 0)
        expected = calendar.timegm(tup)
        self.assertEqual(int(expected), json.loads(output))
        self.assertEqual(int(expected), ujson.decode(output))
    def test_encodeTimeConversion(self):
        tests = [
            datetime.time(),
            datetime.time(1, 2, 3),
            datetime.time(10, 12, 15, 343243),
            datetime.time(10, 12, 15, 343243, pytz.utc),
            # datetime.time(10, 12, 15, 343243, dateutil.tz.gettz('UTC')),  #
            # this segfaults! No idea why.
        ]
        for test in tests:
            output = ujson.encode(test)
            expected = '"%s"' % test.isoformat()
            self.assertEqual(expected, output)
    def test_nat(self):
        input = NaT
        assert ujson.encode(input) == 'null', "Expected null"
    def test_npy_nat(self):
        from distutils.version import LooseVersion
        if LooseVersion(np.__version__) < '1.7.0':
            raise nose.SkipTest("numpy version < 1.7.0, is "
                                "{0}".format(np.__version__))
        input = np.datetime64('NaT')
        assert ujson.encode(input) == 'null', "Expected null"
    def test_datetime_units(self):
        from pandas.lib import Timestamp
        val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
        stamp = Timestamp(val)
        roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
        self.assertEqual(roundtrip, stamp.value // 10**9)
        roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
        self.assertEqual(roundtrip, stamp.value // 10**6)
        roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
        self.assertEqual(roundtrip, stamp.value // 10**3)
        roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
        self.assertEqual(roundtrip, stamp.value)
        self.assertRaises(ValueError, ujson.encode, val, date_unit='foo')
    def test_encodeToUTF8(self):
        _skip_if_python_ver(2, 5)
        input = "\xe6\x97\xa5\xd1\x88"
        enc = ujson.encode(input, ensure_ascii=False)
        dec = ujson.decode(enc)
        self.assertEqual(enc, json_unicode(input, ensure_ascii=False))
        self.assertEqual(dec, json.loads(enc))
    def test_decodeFromUnicode(self):
        input = u("{\"obj\": 31337}")
        dec1 = ujson.decode(input)
        dec2 = ujson.decode(str(input))
        self.assertEqual(dec1, dec2)
    def test_encodeRecursionMax(self):
        # 8 is the max recursion depth
        class O2:
            member = 0
            pass
        class O1:
            member = 0
            pass
        input = O1()
        input.member = O2()
        input.member.member = input
        try:
            output = ujson.encode(input)  # noqa
            assert False, "Expected overflow exception"
        except(OverflowError):
            pass
    def test_encodeDoubleNan(self):
        input = np.nan
        assert ujson.encode(input) == 'null', "Expected null"
    def test_encodeDoubleInf(self):
        input = np.inf
        assert ujson.encode(input) == 'null', "Expected null"
    def test_encodeDoubleNegInf(self):
        input = -np.inf
        assert ujson.encode(input) == 'null', "Expected null"
    def test_decodeJibberish(self):
        input = "fdsa sda v9sa fdsa"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeBrokenArrayStart(self):
        input = "["
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeBrokenObjectStart(self):
        input = "{"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeBrokenArrayEnd(self):
        input = "]"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeArrayDepthTooBig(self):
        input = '[' * (1024 * 1024)
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeBrokenObjectEnd(self):
        input = "}"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeObjectDepthTooBig(self):
        input = '{' * (1024 * 1024)
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeStringUnterminated(self):
        input = "\"TESTING"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeStringUntermEscapeSequence(self):
        input = "\"TESTING\\\""
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeStringBadEscape(self):
        input = "\"TESTING\\\""
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeTrueBroken(self):
        input = "tru"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeFalseBroken(self):
        input = "fa"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeNullBroken(self):
        input = "n"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeBrokenDictKeyTypeLeakTest(self):
        input = '{{1337:""}}'
        for x in range(1000):
            try:
                ujson.decode(input)
                assert False, "Expected exception!"
            except ValueError:
                continue
            assert False, "Wrong exception"
    def test_decodeBrokenDictLeakTest(self):
        input = '{{"key":"}'
        for x in range(1000):
            try:
                ujson.decode(input)
                assert False, "Expected exception!"
            except(ValueError):
                continue
            assert False, "Wrong exception"
    def test_decodeBrokenListLeakTest(self):
        input = '[[[true'
        for x in range(1000):
            try:
                ujson.decode(input)
                assert False, "Expected exception!"
            except(ValueError):
                continue
            assert False, "Wrong exception"
    def test_decodeDictWithNoKey(self):
        input = "{{{{31337}}}}"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeDictWithNoColonOrValue(self):
        input = "{{{{\"key\"}}}}"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeDictWithNoValue(self):
        input = "{{{{\"key\":}}}}"
        try:
            ujson.decode(input)
            assert False, "Expected exception!"
        except(ValueError):
            return
        assert False, "Wrong exception"
    def test_decodeNumericIntPos(self):
        input = "31337"
        self.assertEqual(31337, ujson.decode(input))
    def test_decodeNumericIntNeg(self):
        input = "-31337"
        self.assertEqual(-31337, ujson.decode(input))
    def test_encodeUnicode4BytesUTF8Fail(self):
        _skip_if_python_ver(3)
        input = "\xfd\xbf\xbf\xbf\xbf\xbf"
        try:
            enc = ujson.encode(input)  # noqa
            assert False, "Expected exception"
        except OverflowError:
            pass
    def test_encodeNullCharacter(self):
        input = "31337 \x00 1337"
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        input = "\x00"
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        self.assertEqual('"  \\u0000\\r\\n "', ujson.dumps(u("  \u0000\r\n ")))
        pass
    def test_decodeNullCharacter(self):
        input = "\"31337 \\u0000 31337\""
        self.assertEqual(ujson.decode(input), json.loads(input))
    def test_encodeListLongConversion(self):
        input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
                 9223372036854775807, 9223372036854775807, 9223372036854775807]
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(input, ujson.decode(output))
        tm.assert_numpy_array_equal(np.array(input),
                                    ujson.decode(output, numpy=True,
                                                 dtype=np.int64))
        pass
    def test_encodeLongConversion(self):
        input = 9223372036854775807
        output = ujson.encode(input)
        self.assertEqual(input, json.loads(output))
        self.assertEqual(output, json.dumps(input))
        self.assertEqual(input, ujson.decode(output))
        pass
    def test_numericIntExp(self):
        input = "1337E40"
        output = ujson.decode(input)
        self.assertEqual(output, json.loads(input))
    def test_numericIntFrcExp(self):
        input = "1.337E40"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_decodeNumericIntExpEPLUS(self):
        input = "1337E+9"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_decodeNumericIntExpePLUS(self):
        input = "1.337e+40"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_decodeNumericIntExpE(self):
        input = "1337E40"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_decodeNumericIntExpe(self):
        input = "1337e40"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_decodeNumericIntExpEMinus(self):
        input = "1.337E-4"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_decodeNumericIntExpeMinus(self):
        input = "1.337e-4"
        output = ujson.decode(input)
        self.assertAlmostEqual(output, json.loads(input))
    def test_dumpToFile(self):
        f = StringIO()
        ujson.dump([1, 2, 3], f)
        self.assertEqual("[1,2,3]", f.getvalue())
    def test_dumpToFileLikeObject(self):
        class filelike:
            def __init__(self):
                self.bytes = ''
            def write(self, bytes):
                self.bytes += bytes
        f = filelike()
        ujson.dump([1, 2, 3], f)
        self.assertEqual("[1,2,3]", f.bytes)
    def test_dumpFileArgsError(self):
        try:
            ujson.dump([], '')
        except TypeError:
            pass
        else:
            assert False, 'expected TypeError'
    def test_loadFile(self):
        f = StringIO("[1,2,3,4]")
        self.assertEqual([1, 2, 3, 4], ujson.load(f))
        f = StringIO("[1,2,3,4]")
        tm.assert_numpy_array_equal(
            np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
    def test_loadFileLikeObject(self):
        class filelike:
            def read(self):
                try:
                    self.end
                except AttributeError:
                    self.end = True
                    return "[1,2,3,4]"
        f = filelike()
        self.assertEqual([1, 2, 3, 4], ujson.load(f))
        f = filelike()
        tm.assert_numpy_array_equal(
            np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
    def test_loadFileArgsError(self):
        try:
            ujson.load("[]")
        except TypeError:
            pass
        else:
            assert False, "expected TypeError"
    def test_version(self):
        assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
            "ujson.__version__ must be a string like '1.4.0'"
    def test_encodeNumericOverflow(self):
        try:
            ujson.encode(12839128391289382193812939)
        except OverflowError:
            pass
        else:
            assert False, "expected OverflowError"
    def test_encodeNumericOverflowNested(self):
        for n in range(0, 100):
            class Nested:
                x = 12839128391289382193812939
            nested = Nested()
            try:
                ujson.encode(nested)
            except OverflowError:
                pass
            else:
                assert False, "expected OverflowError"
    def test_decodeNumberWith32bitSignBit(self):
        # Test that numbers that fit within 32 bits but would have the
        # sign bit set (2**31 <= x < 2**32) are decoded properly.
        boundary1 = 2**31  # noqa
        boundary2 = 2**32  # noqa
        docs = (
            '{"id": 3590016419}',
            '{"id": %s}' % 2**31,
            '{"id": %s}' % 2**32,
            '{"id": %s}' % ((2**32) - 1),
        )
        results = (3590016419, 2**31, 2**32, 2**32 - 1)
        for doc, result in zip(docs, results):
            self.assertEqual(ujson.decode(doc)['id'], result)
    def test_encodeBigEscape(self):
        for x in range(10):
            if compat.PY3:
                base = '\u00e5'.encode('utf-8')
            else:
                base = "\xc3\xa5"
            input = base * 1024 * 1024 * 2
            output = ujson.encode(input)  # noqa
    def test_decodeBigEscape(self):
        for x in range(10):
            if compat.PY3:
                base = '\u00e5'.encode('utf-8')
            else:
                base = "\xc3\xa5"
            quote = compat.str_to_bytes("\"")
            input = quote + (base * 1024 * 1024 * 2) + quote
            output = ujson.decode(input)  # noqa
    def test_toDict(self):
        d = {u("key"): 31337}
        class DictTest:
            def toDict(self):
                return d
        o = DictTest()
        output = ujson.encode(o)
        dec = ujson.decode(output)
        self.assertEqual(dec, d)
    def test_defaultHandler(self):
        class _TestObject(object):
            def __init__(self, val):
                self.val = val
            @property
            def recursive_attr(self):
                return _TestObject("recursive_attr")
            def __str__(self):
                return str(self.val)
        self.assertRaises(OverflowError, ujson.encode, _TestObject("foo"))
        self.assertEqual('"foo"', ujson.encode(_TestObject("foo"),
                                               default_handler=str))
        def my_handler(obj):
            return "foobar"
        self.assertEqual('"foobar"', ujson.encode(_TestObject("foo"),
                                                  default_handler=my_handler))
        def my_handler_raises(obj):
            raise TypeError("I raise for anything")
        with tm.assertRaisesRegexp(TypeError, "I raise for anything"):
            ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
        def my_int_handler(obj):
            return 42
        self.assertEqual(
            42, ujson.decode(ujson.encode(_TestObject("foo"),
                                          default_handler=my_int_handler)))
        def my_obj_handler(obj):
            return datetime.datetime(2013, 2, 3)
        self.assertEqual(
            ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))),
            ujson.decode(ujson.encode(_TestObject("foo"),
                                      default_handler=my_obj_handler)))
        l = [_TestObject("foo"), _TestObject("bar")]
        self.assertEqual(json.loads(json.dumps(l, default=str)),
                         ujson.decode(ujson.encode(l, default_handler=str)))
class NumpyJSONTests(TestCase):
    def testBool(self):
        b = np.bool(True)
        self.assertEqual(ujson.decode(ujson.encode(b)), b)
    def testBoolArray(self):
        inpt = np.array([True, False, True, True, False, True, False, False],
                        dtype=np.bool)
        outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
        tm.assert_numpy_array_equal(inpt, outp)
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
    def testIntArray(self):
        arr = np.arange(100, dtype=np.int)
        dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
                  np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
        for dtype in dtypes:
            inpt = arr.astype(dtype)
            outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
            tm.assert_numpy_array_equal(inpt, outp)
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
    def testFloat(self):
        num = np.float(256.2013)
        self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
        num = np.float32(256.2013)
        self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
        num = np.float64(256.2013)
        self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
    def testFloatArray(self):
        arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
        dtypes = (np.float, np.float32, np.float64)
        for dtype in dtypes:
            inpt = arr.astype(dtype)
            outp = np.array(ujson.decode(ujson.encode(
                inpt, double_precision=15)), dtype=dtype)
            assert_array_almost_equal_nulp(inpt, outp)
    def testFloatMax(self):
        num = np.float(np.finfo(np.float).max / 10)
        assert_approx_equal(np.float(ujson.decode(
            ujson.encode(num, double_precision=15))), num, 15)
        num = np.float32(np.finfo(np.float32).max / 10)
        assert_approx_equal(np.float32(ujson.decode(
            ujson.encode(num, double_precision=15))), num, 15)
        num = np.float64(np.finfo(np.float64).max / 10)
        assert_approx_equal(np.float64(ujson.decode(
            ujson.encode(num, double_precision=15))), num, 15)
    def testArrays(self):
        arr = np.arange(100)
        arr = arr.reshape((10, 10))
        tm.assert_numpy_array_equal(
            np.array(ujson.decode(ujson.encode(arr))), arr)
        tm.assert_numpy_array_equal(ujson.decode(
            ujson.encode(arr), numpy=True), arr)
        arr = arr.reshape((5, 5, 4))
        tm.assert_numpy_array_equal(
            np.array(ujson.decode(ujson.encode(arr))), arr)
        tm.assert_numpy_array_equal(ujson.decode(
            ujson.encode(arr), numpy=True), arr)
        arr = arr.reshape((100, 1))
        tm.assert_numpy_array_equal(
            np.array(ujson.decode(ujson.encode(arr))), arr)
        tm.assert_numpy_array_equal(ujson.decode(
            ujson.encode(arr), numpy=True), arr)
        arr = np.arange(96)
        arr = arr.reshape((2, 2, 2, 2, 3, 2))
        tm.assert_numpy_array_equal(
            np.array(ujson.decode(ujson.encode(arr))), arr)
        tm.assert_numpy_array_equal(ujson.decode(
            ujson.encode(arr), numpy=True), arr)
        l = ['a', list(), dict(), dict(), list(),
             42, 97.8, ['a', 'b'], {'key': 'val'}]
        arr = np.array(l)
        tm.assert_numpy_array_equal(
            np.array(ujson.decode(ujson.encode(arr))), arr)
        arr = np.arange(100.202, 200.202, 1, dtype=np.float32)
        arr = arr.reshape((5, 5, 4))
        outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
        assert_array_almost_equal_nulp(arr, outp)
        outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
        assert_array_almost_equal_nulp(arr, outp)
    def testOdArray(self):
        def will_raise():
            ujson.encode(np.array(1))
        self.assertRaises(TypeError, will_raise)
    def testArrayNumpyExcept(self):
        input = ujson.dumps([42, {}, 'a'])
        try:
            ujson.decode(input, numpy=True)
            assert False, "Expected exception!"
        except(TypeError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps(['a', 'b', [], 'c'])
        try:
            ujson.decode(input, numpy=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps([['a'], 42])
        try:
            ujson.decode(input, numpy=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps([42, ['a'], 42])
        try:
            ujson.decode(input, numpy=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps([{}, []])
        try:
            ujson.decode(input, numpy=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps([42, None])
        try:
            ujson.decode(input, numpy=True)
            assert False, "Expected exception!"
        except(TypeError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps([{'a': 'b'}])
        try:
            ujson.decode(input, numpy=True, labelled=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps({'a': {'b': {'c': 42}}})
        try:
            ujson.decode(input, numpy=True, labelled=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
        input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
        try:
            ujson.decode(input, numpy=True, labelled=True)
            assert False, "Expected exception!"
        except(ValueError):
            pass
        except:
            assert False, "Wrong exception"
    def testArrayNumpyLabelled(self):
        input = {'a': []}
        output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
        self.assertTrue((np.empty((1, 0)) == output[0]).all())
        self.assertTrue((np.array(['a']) == output[1]).all())
        self.assertTrue(output[2] is None)
        input = [{'a': 42}]
        output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
        self.assertTrue((np.array([42]) == output[0]).all())
        self.assertTrue(output[1] is None)
        self.assertTrue((np.array([u('a')]) == output[2]).all())
        # Write out the dump explicitly so there is no dependency on iteration
        # order GH10837
        input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
                       '{"a": 2.4, "b": 78}]')
        output = ujson.loads(input_dumps, numpy=True, labelled=True)
        expectedvals = np.array(
            [42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
        self.assertTrue((expectedvals == output[0]).all())
        self.assertTrue(output[1] is None)
        self.assertTrue((np.array([u('a'), 'b']) == output[2]).all())
        input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
                       '"3": {"a": 2.4, "b": 78}}')
        output = ujson.loads(input_dumps, numpy=True, labelled=True)
        expectedvals = np.array(
            [42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
        self.assertTrue((expectedvals == output[0]).all())
        self.assertTrue((np.array(['1', '2', '3']) == output[1]).all())
        self.assertTrue((np.array(['a', 'b']) == output[2]).all())
class PandasJSONTests(TestCase):
    def testDataFrame(self):
        df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
                       'a', 'b'], columns=['x', 'y', 'z'])
        # column indexed
        outp = DataFrame(ujson.decode(ujson.encode(df)))
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        tm.assert_numpy_array_equal(df.index, outp.index)
        dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
        outp = DataFrame(**dec)
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        tm.assert_numpy_array_equal(df.index, outp.index)
        outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
        outp.index = df.index
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
        outp.index = df.index
        self.assertTrue((df.values == outp.values).all())
        outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
        self.assertTrue((df.transpose() == outp).values.all())
        tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
        tm.assert_numpy_array_equal(df.transpose().index, outp.index)
    def testDataFrameNumpy(self):
        df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
                       'a', 'b'], columns=['x', 'y', 'z'])
        # column indexed
        outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        tm.assert_numpy_array_equal(df.index, outp.index)
        dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
                                       numpy=True))
        outp = DataFrame(**dec)
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        tm.assert_numpy_array_equal(df.index, outp.index)
        outp = DataFrame(ujson.decode(
            ujson.encode(df, orient="index"), numpy=True))
        self.assertTrue((df.transpose() == outp).values.all())
        tm.assert_numpy_array_equal(df.transpose().columns, outp.columns)
        tm.assert_numpy_array_equal(df.transpose().index, outp.index)
    def testDataFrameNested(self):
        df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
                       'a', 'b'], columns=['x', 'y', 'z'])
        nested = {'df1': df, 'df2': df.copy()}
        exp = {'df1': ujson.decode(ujson.encode(df)),
               'df2': ujson.decode(ujson.encode(df))}
        self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
        exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
               'df2': ujson.decode(ujson.encode(df, orient="index"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="index")) == exp)
        exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
               'df2': ujson.decode(ujson.encode(df, orient="records"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="records")) == exp)
        exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
               'df2': ujson.decode(ujson.encode(df, orient="values"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="values")) == exp)
        exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
               'df2': ujson.decode(ujson.encode(df, orient="split"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="split")) == exp)
    def testDataFrameNumpyLabelled(self):
        df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
                       'a', 'b'], columns=['x', 'y', 'z'])
        # column indexed
        outp = DataFrame(*ujson.decode(ujson.encode(df),
                                       numpy=True, labelled=True))
        self.assertTrue((df.T == outp).values.all())
        tm.assert_numpy_array_equal(df.T.columns, outp.columns)
        tm.assert_numpy_array_equal(df.T.index, outp.index)
        outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"),
                                       numpy=True, labelled=True))
        outp.index = df.index
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"),
                                       numpy=True, labelled=True))
        self.assertTrue((df == outp).values.all())
        tm.assert_numpy_array_equal(df.columns, outp.columns)
        tm.assert_numpy_array_equal(df.index, outp.index)
    def testSeries(self):
        s = Series([10, 20, 30, 40, 50, 60], name="series",
                   index=[6, 7, 8, 9, 10, 15]).sort_values()
        # column indexed
        outp = Series(ujson.decode(ujson.encode(s))).sort_values()
        self.assertTrue((s == outp).values.all())
        outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
        self.assertTrue((s == outp).values.all())
        dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
        outp = Series(**dec)
        self.assertTrue((s == outp).values.all())
        self.assertTrue(s.name == outp.name)
        dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
                                       numpy=True))
        outp = Series(**dec)
        self.assertTrue((s == outp).values.all())
        self.assertTrue(s.name == outp.name)
        outp = Series(ujson.decode(ujson.encode(
            s, orient="records"), numpy=True))
        self.assertTrue((s == outp).values.all())
        outp = Series(ujson.decode(ujson.encode(s, orient="records")))
        self.assertTrue((s == outp).values.all())
        outp = Series(ujson.decode(
            ujson.encode(s, orient="values"), numpy=True))
        self.assertTrue((s == outp).values.all())
        outp = Series(ujson.decode(ujson.encode(s, orient="values")))
        self.assertTrue((s == outp).values.all())
        outp = Series(ujson.decode(ujson.encode(
            s, orient="index"))).sort_values()
        self.assertTrue((s == outp).values.all())
        outp = Series(ujson.decode(ujson.encode(
            s, orient="index"), numpy=True)).sort_values()
        self.assertTrue((s == outp).values.all())
    def testSeriesNested(self):
        s = Series([10, 20, 30, 40, 50, 60], name="series",
                   index=[6, 7, 8, 9, 10, 15]).sort_values()
        nested = {'s1': s, 's2': s.copy()}
        exp = {'s1': ujson.decode(ujson.encode(s)),
               's2': ujson.decode(ujson.encode(s))}
        self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
        exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
               's2': ujson.decode(ujson.encode(s, orient="split"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="split")) == exp)
        exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
               's2': ujson.decode(ujson.encode(s, orient="records"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="records")) == exp)
        exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
               's2': ujson.decode(ujson.encode(s, orient="values"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="values")) == exp)
        exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
               's2': ujson.decode(ujson.encode(s, orient="index"))}
        self.assertTrue(ujson.decode(
            ujson.encode(nested, orient="index")) == exp)
    def testIndex(self):
        i = Index([23, 45, 18, 98, 43, 11], name="index")
        # column indexed
        outp = Index(ujson.decode(ujson.encode(i)))
        self.assertTrue(i.equals(outp))
        outp = Index(ujson.decode(ujson.encode(i), numpy=True))
        self.assertTrue(i.equals(outp))
        dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
        outp = Index(**dec)
        self.assertTrue(i.equals(outp))
        self.assertTrue(i.name == outp.name)
        dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
                                       numpy=True))
        outp = Index(**dec)
        self.assertTrue(i.equals(outp))
        self.assertTrue(i.name == outp.name)
        outp = Index(ujson.decode(ujson.encode(i, orient="values")))
        self.assertTrue(i.equals(outp))
        outp = Index(ujson.decode(ujson.encode(
            i, orient="values"), numpy=True))
        self.assertTrue(i.equals(outp))
        outp = Index(ujson.decode(ujson.encode(i, orient="records")))
        self.assertTrue(i.equals(outp))
        outp = Index(ujson.decode(ujson.encode(
            i, orient="records"), numpy=True))
        self.assertTrue(i.equals(outp))
        outp = Index(ujson.decode(ujson.encode(i, orient="index")))
        self.assertTrue(i.equals(outp))
        outp = Index(ujson.decode(ujson.encode(i, orient="index"), numpy=True))
        self.assertTrue(i.equals(outp))
    def test_datetimeindex(self):
        from pandas.tseries.index import date_range
        rng = date_range('1/1/2000', periods=20)
        encoded = ujson.encode(rng, date_unit='ns')
        decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
        self.assertTrue(rng.equals(decoded))
        ts = Series(np.random.randn(len(rng)), index=rng)
        decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
        idx_values = decoded.index.values.astype(np.int64)
        decoded.index = DatetimeIndex(idx_values)
        tm.assert_series_equal(ts, decoded)
    def test_decodeArrayTrailingCommaFail(self):
        input = "[31337,]"
        try:
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeArrayLeadingCommaFail(self):
        input = "[,31337]"
        try:
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeArrayOnlyCommaFail(self):
        input = "[,]"
        try:
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeArrayUnmatchedBracketFail(self):
        input = "[]]"
        try:
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeArrayEmpty(self):
        input = "[]"
        ujson.decode(input)
    def test_decodeArrayOneItem(self):
        input = "[31337]"
        ujson.decode(input)
    def test_decodeBigValue(self):
        input = "9223372036854775807"
        ujson.decode(input)
    def test_decodeSmallValue(self):
        input = "-9223372036854775808"
        ujson.decode(input)
    def test_decodeTooBigValue(self):
        try:
            input = "9223372036854775808"
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeTooSmallValue(self):
        try:
            input = "-90223372036854775809"
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeVeryTooBigValue(self):
        try:
            input = "9223372036854775808"
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeVeryTooSmallValue(self):
        try:
            input = "-90223372036854775809"
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeWithTrailingWhitespaces(self):
        input = "{}\n\t "
        ujson.decode(input)
    def test_decodeWithTrailingNonWhitespaces(self):
        try:
            input = "{}\n\t a"
            ujson.decode(input)
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeArrayWithBigInt(self):
        try:
            ujson.loads('[18446098363113800555]')
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeArrayFaultyUnicode(self):
        try:
            ujson.loads('[18446098363113800555]')
        except ValueError:
            pass
        else:
            assert False, "expected ValueError"
    def test_decodeFloatingPointAdditionalTests(self):
        places = 15
        self.assertAlmostEqual(-1.1234567893,
                               ujson.loads("-1.1234567893"), places=places)
        self.assertAlmostEqual(-1.234567893,
                               ujson.loads("-1.234567893"), places=places)
        self.assertAlmostEqual(-1.34567893,
                               ujson.loads("-1.34567893"), places=places)
        self.assertAlmostEqual(-1.4567893,
                               ujson.loads("-1.4567893"), places=places)
        self.assertAlmostEqual(-1.567893,
                               ujson.loads("-1.567893"), places=places)
        self.assertAlmostEqual(-1.67893,
                               ujson.loads("-1.67893"), places=places)
        self.assertAlmostEqual(-1.7893, ujson.loads("-1.7893"), places=places)
        self.assertAlmostEqual(-1.893, ujson.loads("-1.893"), places=places)
        self.assertAlmostEqual(-1.3, ujson.loads("-1.3"), places=places)
        self.assertAlmostEqual(1.1234567893, ujson.loads(
            "1.1234567893"), places=places)
        self.assertAlmostEqual(1.234567893, ujson.loads(
            "1.234567893"), places=places)
        self.assertAlmostEqual(
            1.34567893, ujson.loads("1.34567893"), places=places)
        self.assertAlmostEqual(
            1.4567893, ujson.loads("1.4567893"), places=places)
        self.assertAlmostEqual(
            1.567893, ujson.loads("1.567893"), places=places)
        self.assertAlmostEqual(1.67893, ujson.loads("1.67893"), places=places)
        self.assertAlmostEqual(1.7893, ujson.loads("1.7893"), places=places)
        self.assertAlmostEqual(1.893, ujson.loads("1.893"), places=places)
        self.assertAlmostEqual(1.3, ujson.loads("1.3"), places=places)
    def test_encodeBigSet(self):
        s = set()
        for x in range(0, 100000):
            s.add(x)
        ujson.encode(s)
    def test_encodeEmptySet(self):
        s = set()
        self.assertEqual("[]", ujson.encode(s))
    def test_encodeSet(self):
        s = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
        enc = ujson.encode(s)
        dec = ujson.decode(enc)
        for v in dec:
            self.assertTrue(v in s)
def _clean_dict(d):
    return dict((str(k), v) for k, v in compat.iteritems(d))
if __name__ == '__main__':
    nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
                   exit=False)
 | 
	gpl-2.0 | 
| 
	flowersteam/naminggamesal | 
	naminggamesal/ngvoc/matrix.py | 
	1 | 
	11526 | 
	#!/usr/bin/python
import random
import numpy as np
import copy
import matplotlib.pyplot as plt
import scipy
from scipy import sparse
from . import BaseVocabulary
from . import voc_cache, del_cache
class VocMatrix(BaseVocabulary):
	def __init__(self, M=0, W=0, start='empty',**voc_cfg2):
		self._M = copy.deepcopy(M)
		self._W = copy.deepcopy(W)
		self._size = [self._M,self._W]
		#M = voc_cfg2['M']
		#W = voc_cfg2['W']
		super(VocMatrix,self).__init__(**voc_cfg2)
		self._content=np.matrix(np.zeros((self._M,self._W)))
		if start == 'completed':
			self.complete_empty()
			
	@del_cache
	def fill(self):
		for i in range(0,self._M):
			for j in range(0,self._W):
				self.add(i,j,1)
	@del_cache
	def complete_empty(self):
		assert len(self.get_known_meanings()) == 0
		for i in range(0,self._M):
			j = self.get_new_unknown_w()
			self.add(i,j,1)
	@voc_cache
	def exists(self,m,w):
		if self._content[m,w] > 0:
			return 1
		else:
			return 0
	def get_value(self,m,w):
		return self._content[m,w]
	def get_content(self):
		return self._content
	def get_size(self):
		return self._size
	def get_random_m(self):
		return random.choice(list(range(self._M)))
	@del_cache
	def add(self,m,w,val=1,context=[]):
		self._content[m,w] = val
	@del_cache
	def rm(self,m,w):
		self._content[m,w] = 0
	def rm_syn(self,m,w):
		for i in self.get_known_words(m=m):
			if i!=w:
				self.rm(m,i)
	def rm_hom(self,m,w):
		for i in self.get_known_meanings(w=w):
			if i!=m:
				self.rm(i,w)
	@voc_cache
	def get_row(self, m):
		return self._content[m,:].reshape((1, self._W))
	@voc_cache
	def get_column(self, w):
		return self._content[:,w].reshape((self._M, 1))
	@voc_cache
	def get_known_words(self,m=None,option=None):
		if m is None:
			mat = self._content
		else:
			mat = self.get_row(m)
		coords = self.get_coords(mat, option=option)
		ans = [k[1] for k in coords]
		return sorted(list(set(np.array(ans).reshape(-1,).tolist())))
	@voc_cache
	def get_known_meanings(self,w=None,option=None):
		if w is None:
			mat = self._content
		else:
			mat = self.get_column(w)
		coords = self.get_coords(mat, option=option)
		ans = [k[0] for k in coords]
		return sorted(list(set(np.array(ans).reshape(-1,).tolist())))
	def get_coords(self,mat,option=None):
		nz = mat.nonzero()
		if not nz[0].size:
			return []
		if option is None:
			return self.get_coords_none(mat,nz=nz)
		elif option == 'max':
			return self.get_coords_max(mat,nz=nz)
		elif option == 'min':
			return self.get_coords_min(mat,nz=nz)
		elif option == 'minofmaxw':
			return self.get_coords_minofmaxw(mat,nz=nz)
		elif option == 'minofmaxm':
			return self.get_coords_minofmaxm(mat,nz=nz)
	def get_coords_none(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords = [(nz[0][i],nz[1][i]) for i in range(len(nz[0]))]
		return coords
	def get_coords_max(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords = np.argwhere(mat == np.amax(mat[nz]))
		coords = coords.reshape((-1,2))
		return coords
	def get_coords_min(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords = np.argwhere(mat == np.amin(mat[nz]))
		coords = coords.reshape((-1,2))
		return coords
	def get_coords_minofmaxw(self,mat,nz=None):
		best_scores = mat.max(axis=0)
		val = np.amin(best_scores)
		coords = np.argwhere(best_scores == val)
		coords = coords.reshape((-1,2))
		return coords
	def get_coords_minofmaxm(self,mat,nz=None):
		best_scores = mat.max(axis=1)
		val = np.amin(best_scores)
		coords = np.argwhere(best_scores == val)
		coords = coords.reshape((-1,2))
		return coords
	@voc_cache
	def get_unknown_words(self, m=None, option=None):
		return sorted(list(set(range(self._W)) - set(self.get_known_words(m=m, option=option))))
	@voc_cache
	def get_unknown_meanings(self, w=None, option=None):
		return sorted(list(set(range(self._M)) - set(self.get_known_meanings(w=w, option=option))))
	def diagnostic(self):
		print(self._cache)
		print(self)
	def get_new_unknown_m(self):
		if not len(self.get_known_meanings()) == self._M:
			m = random.choice(self.get_unknown_meanings())
		else:
			#print "tried to get new m but all are known"
			m = self.get_random_known_m(option='minofmaxm')
		return m
	def get_new_unknown_w(self):
		if hasattr(self,'next_word'):
			w = self.next_word
			delattr(self,'next_word')
		elif not len(self.get_known_words()) == self._W:
			w = random.choice(self.get_unknown_words())
		else:
			#print "tried to get new w but all are known"
			w = self.get_random_known_w(option='minofmaxw')
		return w
	def get_random_known_m(self,w=None, option='max'):
		if not len(self.get_known_meanings(w=w)) == 0:
			m = random.choice(self.get_known_meanings(w=w, option=option))
		else:
			#print "tried to get known m but none are known"
			m = self.get_new_unknown_m()
		return m
	def get_random_known_w(self,m=None, option='max'):
		if not len(self.get_known_words(m=m)) == 0:
			w = random.choice(self.get_known_words(m=m, option=option))
		else:
			#print "tried to get known w but none are known"
			w = self.get_new_unknown_w()
		return w
	def visual(self,vtype=None):
		if vtype==None:
			print(self)
		elif vtype=="syn":
			tempmat=np.matrix(np.zeros((self._M,self._W)))
			synvec=[]
			for i in range(0,self._M):
				synvec.append(len(self.get_known_words(i)))
				for j in range(0,self._W):
					tempmat[i,j]=(self._W-synvec[i]+1)*self._content[i,j]
			plt.title("Synonymy")
			plt.xlabel("Words")
			plt.ylabel("Meanings")
			plt.gca().invert_yaxis()
			plt.pcolor(np.array(tempmat),vmin=0,vmax=self._W)
		elif vtype=="hom":
			tempmat=np.matrix(np.zeros((self._M,self._W)))
			homvec=[]
			for j in range(0,self._W):
				homvec.append(len(self.get_known_meanings(j)))
				for i in range(0,self._M):
					tempmat[i,j]=(self._M-homvec[j]+1)*self._content[i,j]
			plt.title("Homonymy")
			plt.xlabel("Words")
			plt.ylabel("Meanings")
			plt.gca().invert_yaxis()
			plt.pcolor(np.array(tempmat),vmin=0,vmax=self._M)
class VocSparseMatrix(VocMatrix):
	voctype="sparse_matrix"
	@voc_cache
	def get_content(self):
		return self._content.todense()
	@voc_cache
	def get_row(self, m):
		return self._content.getrow(m)
	@voc_cache
	def get_column(self, w):
		return self._content.getcol(w)
	def get_coords(self,mat,option=None):
		mat.eliminate_zeros()
		return VocMatrix.get_coords(self,mat,option=option)
class VocLiLMatrix(VocSparseMatrix):
	voctype="lil_matrix"
	def __init__(self,M,W,start='empty',**voc_cfg2):
		super(VocMatrix,self).__init__(**voc_cfg2)
		self._M = M
		self._W = W
		self._content = sparse.lil_matrix((self._M,self._W))
		if start == 'completed':
			self.complete_empty()
	@voc_cache
	def get_column(self, w):
		return self._content.getcol(w).tolil()
	def get_coords_none(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords =[]
		for i in range(len(mat.rows)):
			coords += [(i,mat.rows[i][j]) for j in range(len(mat.rows[i]))]
		return coords
	def get_coords_max(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		mat_max = np.amax(mat.data.max())
		coords =[]
		for i in range(len(mat.rows)):
			coords += [(i,mat.rows[i][j]) for j in range(len(mat.rows[i])) if mat.data[i][j] == mat_max]
		return coords
	def get_coords_min(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		mat_min = np.amin(mat.data.min())
		coords =[]
		for i in range(len(mat.rows)):
			coords += [(i,mat.rows[i][j]) for j in range(len(mat.rows[i])) if mat.data[i][j] == mat_min]
		return coords
class VocCSRMatrix(VocSparseMatrix):
	voctype="csr_matrix"
	def __init__(self,M,W,start='empty',**voc_cfg2):
		super(VocMatrix,self).__init__(**voc_cfg2)
		self._M = M
		self._W = W
		self._content = sparse.csr_matrix((self._M,self._W))
		if start == 'completed':
			self.complete_empty()
	def get_coords_none(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords = [(nz[0][i],nz[1][i]) for i in range(len(nz[0]))] #tolist??
		return coords
	def get_coords_max(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords = [(nz[0][i[0]],nz[1][i[0]]) for i in np.argwhere(mat.data == mat.data.max()) if mat.data.any()]
		return coords
	def get_coords_min(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		coords = [(nz[0][i[0]],nz[1][i[0]]) for i in np.argwhere(mat.data == mat.data.min()) if mat.data.any()]
		return coords
	def get_coords_minofmaxm(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		meanings = self.get_known_meanings(option=None)
		best_scores = np.zeros(len(meanings))
		for i in range(len(nz[0])):
			m = nz[0][i]
			w = nz[1][i]
			index_m = np.argwhere(meanings == m).reshape((-1))[0]
			best_scores[index_m] = max(best_scores[index_m],mat[m,w])
		val = np.amin(best_scores)
		coords_m = np.argwhere(best_scores == val).reshape((-1))
		coords = []
		for m_i in coords_m:
			coords += [(m_i,w_i) for w_i in self.get_known_words(m=m_i,option='max')]
		return coords
	def get_coords_minofmaxw(self,mat,nz=None):
		if nz is None:
			nz = mat.nonzero()
		words = self.get_known_words(option=None)
		best_scores = np.zeros(len(words))
		for i in range(len(nz[0])):
			m = nz[0][i]
			w = nz[1][i]
			index_w = np.argwhere(words == w).reshape((-1))[0]
			best_scores[index_w] = max(best_scores[index_w],mat[m,w])
		val = np.amin(best_scores)
		coords_w = np.argwhere(best_scores == val).reshape((-1))
		coords = []
		for w_i in coords_w:
			coords += [(m_i,w_i) for m_i in self.get_known_meanings(w=w_i,option='max')]
		return coords
class VocCSRMatrixImproved(VocCSRMatrix):
	voctype="csr_matrix_improved"
	def __init__(self,M,W,**voc_cfg2):
		VocCSRMatrix.__init__(self,M,W,**voc_cfg2)
		self._content.tocoo = self.convert_to_coo
	def convert_to_coo(self,copy=True):
		if 'coo' not in list(self._cache.keys()):
			self._cache['coo'] = sparse.csr_matrix.tocoo(self._content)
		return self._cache['coo']
	def __getstate__(self):
		del self._content.tocoo
		out_dict = self.__dict__.copy()
		out_dict['_content'] = copy.deepcopy(self._content)
		self._content.tocoo = self.convert_to_coo
		return out_dict
	def __setstate__(self, in_dict):
		self.__dict__.update(in_dict)
		self._content.tocoo = self.convert_to_coo
class VocCSCMatrix(VocCSRMatrix):
	voctype="csc_matrix"
	def __init__(self,M,W,start='empty',**voc_cfg2):
		super(VocMatrix,self).__init__(**voc_cfg2)
		self._M = M
		self._W = W
		self._content = sparse.csc_matrix((self._M,self._W))
		if start == 'completed':
			self.complete_empty()
class VocDOKMatrix(VocSparseMatrix):
	voctype="dok_matrix"
	def __init__(self,M,W,start='empty',**voc_cfg2):
		super(VocMatrix,self).__init__(**voc_cfg2)
		self._M = M
		self._W = W
		self._content = sparse.dok_matrix((self._M,self._W))
		if start == 'completed':
			self.complete_empty()
	def get_coords(self, mat, option=None):
		if option is None:
			coords = list(mat.keys())
		elif option == 'min':
			mat_min = min(mat.values())
			coords = [k for k,v in mat.items() if v == mat_min]
		elif option == 'max':
			mat_max = max(mat.values())
			coords = [k for k,v in mat.items() if v == mat_max]
		return coords
	def __getstate__(self):
		out_dict = self.__dict__.copy()
		a = {}
		a.update(out_dict['_content'])
		out_dict['_content_dict'] = out_dict['_content'].__dict__
		out_dict['_content'] = a
		return out_dict
	def __setstate__(self, in_dict):
		mat = sparse.dok_matrix((in_dict['_M'],in_dict['_W']))
		mat.__dict__ = in_dict['_content_dict']
		mat.update(in_dict['_content'])
		del in_dict['_content_dict']
		in_dict['_content'] = mat
		self.__dict__ = in_dict
 | 
	agpl-3.0 | 
| 
	quimbp/cosmo | 
	modules/cosmo/plotxy.py | 
	1 | 
	21085 | 
	''' plotxy.py
    Joaquim Ballabrera, July 2020.
'''
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import font as tkfont
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.dates import date2num
def rot(phi):
# -----------
  ''' Returns the complex rotation factor: exp(j*phi)'''
  return np.exp(1j*phi)
def plot_ellipse(ax,xo,yo,a,b,mphi,ephi):
# ---------------------------------------
  phi = ephi
  # Rotation matrix
  R = np.array([[np.cos(phi), -np.sin(phi)],[np.sin(phi), np.cos(phi)]])
  
  # Parametric centered ellipse
  n = 100
  t = np.linspace(0,2*np.pi,n)
  E = np.array([a*np.cos(t), b*np.sin(t)])
  Er = np.zeros((2,n))
  for i in range(n):
    Er[:,i] = np.dot(R,E[:,i])
  ax.plot(xo+Er[0,:], yo+Er[1,:],linestyle='-')
  C = np.array([b*np.cos(t), b*np.sin(t)])
  ax.plot(xo+C[0,:], yo+C[1,:],linestyle='--')
  # - Direction mean flow
  tc = np.arctan2(np.tan(mphi)*a,b)
  xmax = np.abs(a*np.cos(tc))
  xmin = -np.abs(a*np.cos(tc))
  x = np.linspace(xmin,xmax,20)
  y = np.tan(mphi)*x
  ax.plot(xo+x, yo+y,linestyle='--',color='brown',linewidth=1.5,label='Mean flow direction')
  # - Eddy orientation
  tc = np.arctan2(np.tan(ephi)*a,b)
  xmax = np.abs(a*np.cos(tc))
  xmin = -np.abs(a*np.cos(tc))
  x = np.linspace(xmin,xmax,20)
  y = np.tan(ephi)*x
  ax.plot(xo+x, yo+y,linestyle='--',color='red',linewidth=0.8,label='Eddy orientation')
  ax.legend()
# ===========
class PLOTXY:
# ===========
  ''' Launches a widget for time series plotting '''
  def __init__(self,master,t=None,u=None,v=None,plot_type=None,exit_mode=None,**args):
  # ----------------------------------------------------------------------------------
    try:
      wid = args['wid']
    except:
      wid = None
    self.master    = master
    self.exit_mode = exit_mode
    self.t         = t           # Working data
    self.u         = u           # Working data
    self.v         = v           # Working data
    self.u_orig    = u           # Data backup
    self.v_orig    = v           # Data backup
    ucolor = args.pop('ucolor','blue')
    vcolor = args.pop('vcolor','red')
    uthick = args.pop('uthick',1)
    vthick = args.pop('vthick',1)
    ulabel = args.pop('ulabel','u')
    vlabel = args.pop('vlabel','v')
    title  = args.pop('title','')
    scale  = args.pop('scale',10)
    tcolor = args.pop('tcolor','green')
    twidth = args.pop('twidth',0.004)
    if t is None:
      t = np.arange(len(u))
    self.umin   = tk.DoubleVar()
    self.umax   = tk.DoubleVar()
    self.vmin   = tk.DoubleVar()
    self.vmax   = tk.DoubleVar()
    self.ucolor = tk.StringVar()
    self.vcolor = tk.StringVar()
    self.uthick = tk.DoubleVar()
    self.vthick = tk.DoubleVar()
    self.xlabel = tk.StringVar()
    self.ulabel = tk.StringVar()
    self.vlabel = tk.StringVar()
    self.tscale = tk.DoubleVar()
    self.tcolor = tk.StringVar()
    self.twidth = tk.DoubleVar()
    self.title  = tk.StringVar()
    self.Vgrid  = tk.BooleanVar()
    self.Hgrid  = tk.BooleanVar()
    self.type = tk.StringVar()
    self.type_options = ['u plot','uv plot','stick plot','rotated uv','var ellipse']
    if v is None:
      self.type_options = ['u plot']
      selt.type.set('u plot')
    self.ucolor.set(ucolor)
    self.vcolor.set(vcolor)
    self.uthick.set(uthick)
    self.vthick.set(vthick)
    self.ulabel.set(ulabel)
    self.vlabel.set(vlabel)
    self.Vgrid.set(True)
    self.Hgrid.set(False)
    self.tscale.set(scale)
    self.tcolor.set(tcolor)
    self.twidth.set(twidth)
    self.title.set(title)
    try:
      value = args['umin']
      self.umin.set(float(value))
    except:
      umin = np.nanmin(u)
      self.umin.set(umin)
    try:
      value = args['umax']
      self.umax.set(float(value))
    except:
      umax = np.nanmax(u)
      self.umax.set(umax)
    if v is None:
      self.type.set('u plot')
    else:
      self.type.set('uv plot')
      # Modify the default values:
      try:
        value = args['vmin']
        self.vmin.set(float(value))
      except:
        vmin = np.nanmin(v)
        self.vmin.set(vmin)
      try:
        value = args['vmax']
        self.vmax.set(float(value))
      except:
        vmax = np.nanmax(v)
        self.vmax.set(vmax)
    # Define widget
    self.main = tk.Frame(master)
    F0 = ttk.Frame(self.main,padding=5)
    ttk.Label(F0,text='Plot type',width=12,padding=3).grid(row=0,column=0,sticky='e')
    self.wtype = ttk.Combobox(F0,textvariable=self.type,values=self.type_options,width=12)
    self.wtype.grid(row=0,column=1,sticky='w')
    self.wtype.bind("<<ComboboxSelected>>", lambda f: self.select_plot())
    FU = ttk.Frame(F0)
    ttk.Label(FU,text='U').grid(row=0,column=0,pady=5,sticky='w')
    ttk.Label(FU,text='Max value',padding=3).grid(row=1,column=0,sticky='e')
    self.wumax = ttk.Entry(FU,textvariable=self.umax,width=12)
    self.wumax.grid(row=1,column=1,sticky='ew')
    self.wumax.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FU,text='Min value',padding=3).grid(row=2,column=0,sticky='e')
    self.wumin = ttk.Entry(FU,textvariable=self.umin,width=12)
    self.wumin.grid(row=2,column=1,sticky='ew')
    self.wumin.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FU,text='Line color',padding=3).grid(row=3,column=0,sticky='e')
    self.wucol = ttk.Entry(FU,textvariable=self.ucolor,width=12)
    self.wucol.grid(row=3,column=1,sticky='ew')
    self.wucol.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FU,text='Line thickness',padding=3).grid(row=4,column=0,sticky='e')
    self.wuthk = ttk.Entry(FU,textvariable=self.uthick,width=12)
    self.wuthk.grid(row=4,column=1,sticky='ew')
    self.wuthk.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FU,text='Label',padding=3).grid(row=5,column=0,sticky='e')
    self.wulab = ttk.Entry(FU,textvariable=self.ulabel,width=12)
    self.wulab.grid(row=5,column=1,sticky='ew')
    self.wulab.bind("<Return>", lambda f: self.make_plot())
    FU.grid(row=1,column=0,columnspan=2)
    FV = ttk.Frame(F0)
    ttk.Label(FV,text='V').grid(row=0,column=0,pady=5,sticky='w')
    ttk.Label(FV,text='Max value',padding=3).grid(row=1,column=0,sticky='e')
    self.wvmax = ttk.Entry(FV,textvariable=self.vmax,width=12)
    self.wvmax.grid(row=1,column=1,sticky='ew')
    self.wvmax.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FV,text='Min value',padding=3).grid(row=2,column=0,sticky='e')
    self.wvmin = ttk.Entry(FV,textvariable=self.vmin,width=12)
    self.wvmin.grid(row=2,column=1,sticky='ew')
    self.wvmin.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FV,text='Line color',padding=3).grid(row=3,column=0,sticky='e')
    self.wvcol = ttk.Entry(FV,textvariable=self.vcolor,width=12)
    self.wvcol.grid(row=3,column=1,sticky='ew')
    self.wvcol.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FV,text='Line thickness',padding=3).grid(row=4,column=0,sticky='e')
    self.wvthk = ttk.Entry(FV,textvariable=self.vthick,width=12)
    self.wvthk.grid(row=4,column=1,sticky='ew')
    self.wvthk.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FV,text='Label',padding=3).grid(row=5,column=0,sticky='e')
    self.wvlab = ttk.Entry(FV,textvariable=self.vlabel,width=12)
    self.wvlab.grid(row=5,column=1,sticky='ew')
    self.wvlab.bind("<Return>", lambda f: self.make_plot())
    FV.grid(row=2,column=0,columnspan=2)
    FT = ttk.Frame(F0)
    ttk.Label(FT,text='Stick plot options').grid(row=0,column=0,pady=5,sticky='w')
    ttk.Label(FT,text='Scale',padding=3).grid(row=1,column=0,sticky='e')
    self.wtscl = ttk.Entry(FT,textvariable=self.tscale,width=12)
    self.wtscl.grid(row=1,column=1,sticky='ew')
    self.wtscl.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FT,text='Color',padding=3).grid(row=2,column=0,sticky='e')
    self.wtcol = ttk.Entry(FT,textvariable=self.tcolor,width=12)
    self.wtcol.grid(row=2,column=1,sticky='ew')
    self.wtcol.bind("<Return>", lambda f: self.make_plot())
    ttk.Label(FT,text='Thickness',padding=3).grid(row=3,column=0,sticky='e')
    self.wtthk = ttk.Entry(FT,textvariable=self.twidth,width=12)
    self.wtthk.grid(row=3,column=1,sticky='ew')
    self.wtthk.bind("<Return>", lambda f: self.make_plot())
    FT.grid(row=3,column=0,columnspan=2)
    FM = ttk.Frame(F0)
    ttk.Label(FM,text='Grid options').grid(row=0,column=0,pady=5,sticky='w')
    ttk.Label(FM,text='Show vertical grid',padding=3).grid(row=1,column=0,sticky='e')
    ttk.Checkbutton(FM,variable=self.Vgrid,command = lambda : self.make_plot(),padding=3).grid(row=1,column=1,sticky='we')
    ttk.Label(FM,text='Show horizontal grid',padding=3).grid(row=2,column=0,sticky='e')
    ttk.Checkbutton(FM,variable=self.Hgrid,command = lambda : self.make_plot(),padding=3).grid(row=2,column=1,sticky='we')
    ttk.Label(FM,text='Title').grid(row=3,column=0,pady=5,sticky='w')
    self.wtitle = ttk.Entry(FM,textvariable=self.title,width=30)
    self.wtitle.grid(row=4,column=0,columnspan=2,sticky='ew')
    self.wtitle.bind("<Return>", lambda f: self.make_plot())
    tk.Button(FM,text='Save',command=self.save_plot).grid(row=5,column=0,padx=5,pady=5)
    tk.Button(FM,text='Quit',command=self.quit_plot).grid(row=5,column=1,padx=5,pady=5)
    FM.grid(row=4,column=0,columnspan=2)
    F0.grid()
    FG = ttk.Frame(self.main)
    self.fig = Figure(dpi=100)
    if self.type.get() == 'uv plot':
      self.ax1 = self.fig.add_subplot(211)
      self.ax2 = self.fig.add_subplot(212)
    else:
      self.ax1 = self.fig.add_subplot(111)
    self.canvas = FigureCanvasTkAgg(self.fig,master=FG)
    self.canvas.draw()
    self.canvas.get_tk_widget().grid(sticky='nsew')
    self.canvas._tkcanvas.grid(sticky='nsew')
    FG.grid(row=0,column=1,rowspan=5,sticky='nsew')
    #FG.grid_columnconfigure(0,weight=1)
    #FG.grid_columnconfigure(1,weight=1)
    self.main.grid()
    self.main.grid_columnconfigure(0,weight=1)
    self.main.grid_columnconfigure(1,weight=1)
    self.main.grid_columnconfigure(2,weight=1)
    self.make_plot()
    if v is None:
      self.wvmax.configure(state='disabled')
      self.wvmin.configure(state='disabled')
      self.wvcol.configure(state='disabled')
      self.wvthk.configure(state='disabled')
      self.wvlab.configure(state='disabled')
    
    if self.type != 'stick plot':
      self.wtscl.configure(state='disabled')
      self.wtcol.configure(state='disabled')
      self.wtthk.configure(state='disabled')
  def save_plot(self):
  # -------------------  
      filetypes = [('PNG file','.png'),('JPG file','.jpg'),('PDF file','.pdf')]
      nn = tk.filedialog.asksaveasfilename(title='Save',
                                           initialdir='./',
                                           filetypes=filetypes,
                                           confirmoverwrite=True)
      if len(nn) > 0:
        filename = '%s' % nn
        self.fig.savefig(filename,
                         dpi=180,
                         bbox_inches='tight')
  def quit_plot(self):
  # -------------------  
    if self.exit_mode == 'quit':
      ''' Closing the main widget '''
      messagebox.askquestion('Close','Are you sure?',icon='warning')
      if 'yes':
        quit()
    else:
      self.master.destroy()
      return
  def select_plot(self):
  # ---------------------------------------
    self.fig.clear()
    if self.type.get() == 'u plot':
      self.u = self.u_orig
      self.wumax.configure(state='normal')
      self.wumin.configure(state='normal')
      self.wucol.configure(state='normal')
      self.wuthk.configure(state='normal')
      self.wulab.configure(state='normal')
      self.wvmax.configure(state='disabled')
      self.wvmin.configure(state='disabled')
      self.wvcol.configure(state='disabled')
      self.wvthk.configure(state='disabled')
      self.wvlab.configure(state='disabled')
      self.wtscl.configure(state='disabled')
      self.wtcol.configure(state='disabled')
      self.wtthk.configure(state='disabled')
      self.ax1 = self.fig.add_subplot(111)
      self.make_plot()
      return
      
    if self.type.get() == 'uv plot':
      self.u = self.u_orig
      self.v = self.v_orig
      self.wumax.configure(state='normal')
      self.wumin.configure(state='normal')
      self.wucol.configure(state='normal')
      self.wuthk.configure(state='normal')
      self.wulab.configure(state='normal')
      self.wvmax.configure(state='normal')
      self.wvmin.configure(state='normal')
      self.wvcol.configure(state='normal')
      self.wvthk.configure(state='normal')
      self.wvlab.configure(state='normal')
      self.wtscl.configure(state='disabled')
      self.wtcol.configure(state='disabled')
      self.wtthk.configure(state='disabled')
      self.ax1 = self.fig.add_subplot(211)
      self.ax2 = self.fig.add_subplot(212)
      self.make_plot()
      return
    if self.type.get() == 'stick plot':
      self.u = self.u_orig
      self.v = self.v_orig
      self.wumax.configure(state='disabled')
      self.wumin.configure(state='disabled')
      self.wucol.configure(state='disabled')
      self.wuthk.configure(state='disabled')
      self.wulab.configure(state='disabled')
      self.wvmax.configure(state='disabled')
      self.wvmin.configure(state='disabled')
      self.wvcol.configure(state='disabled')
      self.wvthk.configure(state='disabled')
      self.wvlab.configure(state='disabled')
      self.wtscl.configure(state='normal')
      self.wtcol.configure(state='normal')
      self.wtthk.configure(state='normal')
      self.ax1 = self.fig.add_subplot(111)
      self.make_plot()
      return
      
    if self.type.get() == 'rotated uv':
      self.ax1 = self.fig.add_subplot(211)
      self.ax2 = self.fig.add_subplot(212)
      self.rotated_uv()
    if self.type.get() == 'var ellipse':
      u = self.u
      v = self.v
      # Calculation of the anomalies
      mu = np.mean(u)
      mv = np.mean(v)
      mphi = np.angle(mu+1j*mv)
      print('Angle mean current = ', mphi, 180*mphi/np.pi)
      u = u - np.mean(u)
      v = v - np.mean(v)
      suu = np.dot(u,u)
      svv = np.dot(v,v)
      suv = np.dot(u,v)
      Tra = suu + svv
      Det = suu*svv - suv*suv
      a2  = 0.5*(Tra + np.sqrt(Tra*Tra - 4*Det))
      b2  = 0.5*(Tra - np.sqrt(Tra*Tra - 4*Det))
      aphi = 0.5*np.arctan2(2*suv,suu-svv)
      print('Test: ',2*suv/(suu-svv), np.tan(2*aphi))
      print('Eddy kinetic energy: ', 0.5*Tra)
      print('Total eddy variance: ', a2 + b2, Tra)
      print('Directional eddy variance: ', a2 - b2)
      print('Isotropic eddy variance: ', 2*b2)
      print('Polarization factor: ', (a2-b2)/(a2+b2))
      print('Variance angle: ', aphi, 180*aphi/np.pi)
      self.ax1 = self.fig.add_subplot(111)
      self.ax1.axis('equal')
      plt.subplots_adjust(bottom=0.4)
      self.ax1.set_xlim(-np.sqrt(Tra),np.sqrt(Tra))
      self.ax1.set_ylim(-np.sqrt(Tra),np.sqrt(Tra))
      plot_ellipse(self.ax1,0,0,np.sqrt(a2),np.sqrt(b2),mphi,aphi)
      txt1 = 'Mean velocity components: %.2f, %.2f ' %(mu,mv)
      txt2 = 'Mean velocity angle: %.2f ' %(180*mphi/np.pi)
      txt3 = 'Total anomaly variance: %.2f ' %(Tra)
      txt4 = 'Directional anomaly variance: %.2f ' %(a2-b2)
      txt5 = 'Isotropic anomaly variance: %.2f ' %(2*b2)
      txt6 = 'Polarization factor: %.2f ' %((a2-b2)/Tra)
      txt7 = 'Eddy orientation: %.2f ' %(180*aphi/np.pi)
      print(txt1)
      print(txt2)
      print(txt3)
      print(txt4)
      print(txt5)
      print(txt6)
      print(txt7)
      self.ax1.annotate(txt1,(0.11,0.98),xycoords='figure fraction')
      self.ax1.annotate(txt2,(0.55,0.98),xycoords='figure fraction')
      self.ax1.annotate(txt3,(0.11,0.95),xycoords='figure fraction')
      self.ax1.annotate(txt4,(0.11,0.92),xycoords='figure fraction')
      self.ax1.annotate(txt5,(0.55,0.92),xycoords='figure fraction')
      self.ax1.annotate(txt6,(0.11,0.89),xycoords='figure fraction')
      self.ax1.annotate(txt7,(0.55,0.89),xycoords='figure fraction')
      #self.ax1.text(0.05,0.01,'Mean velocity components: %.2f, %.2f' % (mu,mv), \
      #            ha='right',fontsize=8)
      #plt.figtext(0.05,0.01,'Mean velocity components: '+str(mu)+', '+str(mv), \
      #            horizontalalignment='right',fontsize=8)
      #plt.figtext(0.18,0.92,'Mean velocity angle: '+str(180*mphi/np.pi), \
      #            horizontalalignment='right',fontsize=8)
      #plt.figtext(0.18,0.89,'Total eddy variance: '+str(Tra), \
      #            horizontalalignment='right',fontsize=8)
      self.canvas.draw()
      #self.make_plot()
  def rotated_uv(self):
  # -----------------------------
    c = self.u + 1j*self.v          # Complex velocity
    mc = np.mean(c)
    print('Mean current: ',mc)
    angle_rad = np.angle(mc)
    angle_deg = 180*angle_rad/np.pi
    print('Mean current angle (Rad, Deg): ',angle_rad,angle_deg)
    rc = c*rot(-angle_rad)  # Rotated current
    print('Mean rotated current: ', np.mean(rc))
    #print(np.angle(np.mean(rc)))
    self.u = rc.real
    self.v = rc.imag
    self.make_plot()
      
  def stick_plot(self,time,u,v,**kw):
  # -----------------------------
    width = kw.pop('width', 0.002)
    headwidth = kw.pop('headwidth', 0)
    headlength = kw.pop('headlength', 0)
    headaxislength = kw.pop('headaxislength', 0)
    ax = kw.pop('ax', None)
    time, u, v = map(np.asanyarray, (time, u, v))
    if not ax:
        fig, ax = plt.subplots()
    q = ax.quiver(date2num(time), [[0]*len(time)], u, v,
                  angles='uv', width=width, headwidth=headwidth,
                  headlength=headlength, headaxislength=headaxislength,
                  **kw)
    ax.axes.get_yaxis().set_visible(False)
    ax.xaxis_date()
    return q
  def make_plot(self):
  # ---------------------------------------
    if self.type.get() == 'u plot':
    # - - - - - - - - - - - - - - - -
      self.ax1.clear()
      self.ax1.plot(self.t,self.u,                        \
                   color=self.ucolor.get(),   \
                   linewidth=self.uthick.get())
      self.ax1.set_ylim(self.umin.get(),self.umax.get())
      self.ax1.set_ylabel(self.ulabel.get())
      self.ax1.axhline(color='black')
      if self.Vgrid.get():
        self.ax1.xaxis.grid()
      if self.Hgrid.get():
        self.ax1.yaxis.grid()
      if isinstance(self.t[0],datetime.datetime):
        self.ax1.tick_params(axis='x',rotation=35)
    elif self.type.get() == 'uv plot' or self.type.get() == 'rotated uv':
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      self.ax1.clear()
      self.ax1.plot(self.t,self.u,                        \
                   color=self.ucolor.get(),   \
                   linewidth=self.uthick.get())
      self.ax1.set_ylim(self.umin.get(),self.umax.get())
      self.ax1.set_ylabel(self.ulabel.get())
      self.ax1.axhline(color='black')
      if self.Vgrid.get():
        self.ax1.xaxis.grid()
      if self.Hgrid.get():
        self.ax1.yaxis.grid()
      # Hide bottom labels from top plot
      self.ax1.tick_params(labelbottom=False) 
      self.ax2.clear()
      self.ax2.plot(self.t,self.v,                        \
                   color=self.vcolor.get(),   \
                   linewidth=self.vthick.get())
      # Set vertical limits
      self.ax2.set_ylim(self.vmin.get(),self.vmax.get())
      # Print vertical label
      self.ax2.set_ylabel(self.vlabel.get())
      self.ax2.axhline(color='black')
      if isinstance(self.t[0],datetime.datetime):
        self.ax2.tick_params(axis='x',rotation=35)
      # Show (or not) Vertical grid
      if self.Vgrid.get():
        self.ax2.xaxis.grid()
      # Show (or not) Horizontal grid
      if self.Hgrid.get():
        self.ax2.yaxis.grid()
    elif self.type.get() == 'stick plot':
    # - - - - - - - - - - - - - - - - - -
      self.ax1.clear()
      q = self.stick_plot(self.t,self.u,self.v,                     \
                          ax=self.ax1,               \
                          width=self.twidth.get(),   \
                          scale=self.tscale.get(),   \
                          color=self.tcolor.get())
      if self.Vgrid.get():
        self.ax1.xaxis.grid()
      if self.Hgrid.get():
        self.ax1.yaxis.grid()
      if isinstance(self.t[0],datetime.datetime):
        self.ax1.tick_params(axis='x',rotation=35)
    self.ax1.set_title(self.title.get())
    self.canvas.draw()
def main():
  from datetime import datetime, timedelta
  def _close():
    quit()
  # Random data to plot
  u = np.random.rand(100)
  v = np.random.rand(100)
  start = datetime.now()
  time = [start + timedelta(days=n) for n in range(len(u))]
  root = tk.Tk()
  root.title('PLOTXY')
  root.resizable(width=True,height=False)
  root.protocol('WM_DELETE_WINDOW',_close)
  app = PLOTXY(root,t=time,u=u,v=v,exit_mode='quit')
  root.mainloop()
if __name__ == '__main__':
  main()
 | 
	mit | 
| 
	jmschrei/scikit-learn | 
	sklearn/feature_extraction/tests/test_dict_vectorizer.py | 
	276 | 
	3790 | 
	# Authors: Lars Buitinck <[email protected]>
#          Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
                                   assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
    D = [{"foo": 1, "bar": 3},
         {"bar": 4, "baz": 2},
         {"bar": 1, "quux": 1, "quuux": 2}]
    for sparse in (True, False):
        for dtype in (int, np.float32, np.int16):
            for sort in (True, False):
                for iterable in (True, False):
                    v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
                    X = v.fit_transform(iter(D) if iterable else D)
                    assert_equal(sp.issparse(X), sparse)
                    assert_equal(X.shape, (3, 5))
                    assert_equal(X.sum(), 14)
                    assert_equal(v.inverse_transform(X), D)
                    if sparse:
                        # CSR matrices can't be compared for equality
                        assert_array_equal(X.A, v.transform(iter(D) if iterable
                                                            else D).A)
                    else:
                        assert_array_equal(X, v.transform(iter(D) if iterable
                                                          else D))
                    if sort:
                        assert_equal(v.feature_names_,
                                     sorted(v.feature_names_))
def test_feature_selection():
    # make two feature dicts with two useful features and a bunch of useless
    # ones, in terms of chi2
    d1 = dict([("useless%d" % i, 10) for i in range(20)],
              useful1=1, useful2=20)
    d2 = dict([("useless%d" % i, 10) for i in range(20)],
              useful1=20, useful2=1)
    for indices in (True, False):
        v = DictVectorizer().fit([d1, d2])
        X = v.transform([d1, d2])
        sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
        v.restrict(sel.get_support(indices=indices), indices=indices)
        assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
    D_in = [{"version": "1", "ham": 2},
            {"version": "2", "spam": .3},
            {"version=3": True, "spam": -1}]
    v = DictVectorizer()
    X = v.fit_transform(D_in)
    assert_equal(X.shape, (3, 5))
    D_out = v.inverse_transform(X)
    assert_equal(D_out[0], {"version=1": 1, "ham": 2})
    names = v.get_feature_names()
    assert_true("version=2" in names)
    assert_false("version" in names)
def test_unseen_or_no_features():
    D = [{"camelot": 0, "spamalot": 1}]
    for sparse in [True, False]:
        v = DictVectorizer(sparse=sparse).fit(D)
        X = v.transform({"push the pram a lot": 2})
        if sparse:
            X = X.toarray()
        assert_array_equal(X, np.zeros((1, 2)))
        X = v.transform({})
        if sparse:
            X = X.toarray()
        assert_array_equal(X, np.zeros((1, 2)))
        try:
            v.transform([])
        except ValueError as e:
            assert_in("empty", str(e))
def test_deterministic_vocabulary():
    # Generate equal dictionaries with different memory layouts
    items = [("%03d" % i, i) for i in range(1000)]
    rng = Random(42)
    d_sorted = dict(items)
    rng.shuffle(items)
    d_shuffled = dict(items)
    # check that the memory layout does not impact the resulting vocabulary
    v_1 = DictVectorizer().fit([d_sorted])
    v_2 = DictVectorizer().fit([d_shuffled])
    assert_equal(v_1.vocabulary_, v_2.vocabulary_)
 | 
	bsd-3-clause | 
| 
	pratapvardhan/scikit-learn | 
	examples/neural_networks/plot_mlp_alpha.py | 
	19 | 
	4088 | 
	"""
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02  # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
    names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
    classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
                           random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
            make_circles(noise=0.2, factor=0.5, random_state=1),
            linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
    # preprocess dataset, split into training and test part
    X = StandardScaler().fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    # just plot the dataset first
    cm = plt.cm.RdBu
    cm_bright = ListedColormap(['#FF0000', '#0000FF'])
    ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
    # Plot the training points
    ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
    # and testing points
    ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
    ax.set_xlim(xx.min(), xx.max())
    ax.set_ylim(yy.min(), yy.max())
    ax.set_xticks(())
    ax.set_yticks(())
    i += 1
    # iterate over classifiers
    for name, clf in zip(names, classifiers):
        ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
        clf.fit(X_train, y_train)
        score = clf.score(X_test, y_test)
        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, m_max]x[y_min, y_max].
        if hasattr(clf, "decision_function"):
            Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
        else:
            Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
        # Plot also the training points
        ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
        # and testing points
        ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
                   alpha=0.6)
        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())
        ax.set_title(name)
        ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
                size=15, horizontalalignment='right')
        i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
 | 
	bsd-3-clause | 
| 
	deeplook/bokeh | 
	bokeh/cli/core.py | 
	42 | 
	16025 | 
	from __future__ import absolute_import, print_function
import sys, os
from six.moves.urllib import request as urllib2
from six.moves import cStringIO as StringIO
import pandas as pd
try:
    import click
    is_click = True
except ImportError:
    is_click = False
from . import help_messages as hm
from .utils import (get_chart_params, get_charts_mapping,
                    get_data_series, keep_source_input_sync, get_data_from_url)
from .. import charts as bc
from ..charts import utils as bc_utils
from bokeh.models.widgets import Button
# Define a mapping to connect chart types supported arguments and chart classes
CHARTS_MAP = get_charts_mapping()
if is_click:
    @click.command()
    @click.option('--input', 'input_source', default=None,help=hm.HELP_INPUT)
    @click.option('--output', default='file://cli_output.html', help=hm.HELP_OUTPUT)
    @click.option('--title', default='Bokeh CLI')
    @click.option('--chart_type', default='Line')
    @click.option('--index', default='', help=hm.HELP_INDEX)
    @click.option('--series', default='', help=hm.HELP_SERIES)
    @click.option('--palette')
    @click.option('--buffer', default='f', help=hm.HELP_BUFFER)
    @click.option('--sync_with_source', default=False)
    @click.option('--update_ranges', 'update_ranges', flag_value=True,
                  default=False)
    @click.option('--legend', 'show_legend', flag_value=True,
                  default=False)
    @click.option('--window_size', default='0', help=hm.HELP_WIN_SIZE)
    @click.option('--map', 'map_', default=None)
    @click.option('--map_zoom', 'map_zoom', default=12)
    @click.option('--map_layer', 'map_layer', default="hybrid")
    @click.option('--smart_filters', 'smart_filters', flag_value=True,
                  default=False)
    def cli(input_source, output, title, chart_type, series, palette, index,
            buffer, sync_with_source, update_ranges, show_legend, window_size,
            map_, smart_filters, map_zoom, map_layer):
        """Bokeh Command Line Tool is a minimal client to access high level plotting
        functionality provided by bokeh.charts API.
        Examples:
        >> python bokeh-cli.py --title "My Nice Plot" --series "High,Low,Close"
        --chart_type "Line" --palette Reds --input sample_data/stocks_data.csv
        >> cat sample_data/stocks_data.csv | python bokeh-cli.py --buffer t
        >> python bokeh-cli.py --help
        """
        cli = CLI(
            input_source, output, title, chart_type, series, palette, index, buffer,
            sync_with_source, update_ranges, show_legend, window_size, map_,
            smart_filters, map_zoom, map_layer
        )
        cli.run()
else:
    def cli():
        print("The CLI tool requires click to be installed")
class CLI(object):
    """This is the Bokeh Command Line Interface class and it is in
    charge of providing a very high level access to bokeh charts and
    extends it with functionality.
    """
    def __init__(self, input_source, output, title, chart_type, series, palette,
                 index, buffer, sync_with_source, update_ranges, show_legend,
                 window_size, map_, smart_filters, map_zoom, map_layer):
        """Args:
        input_source (str): path to the series data file (i.e.:
            /source/to/my/data.csv)
            NOTE: this can be either a path to a local file or an url
        output (str, optional): Selects the plotting output, which
            could either be sent to an html file or a bokeh server
            instance. Syntax convention for this option is as follows:
            <output_type>://<type_arg>
            where:
              - output_type: 'file' or 'server'
              - 'file' type options: path_to_output_file
              - 'server' type options syntax: docname[@url][@name]
            Defaults to: --output file://cli_output.html
            Examples:
                --output file://cli_output.html
                --output file:///home/someuser/bokeh_rocks/cli_output.html
                --output server://clidemo
            Default: file://cli_output.html.
        title (str, optional): the title of your chart.
            Default: None.
        chart_type (str, optional): charts classes to use to consume and
            render the input data.
            Default: Line.
        series (str, optional): Name of the series from the input source
            to include in the plot. If not specified all source series
            will be included.
            Defaults to None.
        palette (str, optional): name of the colors palette to use.
            Default: None.
        index (str, optional): Name of the data series to be used as the
            index when plotting. By default the first series found on the
            input file is taken
            Default: None
        buffer (str, optional): if is `t` reads data source as string from
            input buffer using StringIO(sys.stdin.read()) instead of
            reading from a file or an url.
            Default: "f"
        sync_with_source (bool, optional): if True keep the charts source
            created on bokeh-server sync'ed with the source acting like
            `tail -f`.
            Default: False
        window_size (int, optional): show up to N values then start dropping
            off older ones
            Default: '0'
        Attributes:
            source (obj): datasource object for the created chart.
            chart (obj): created chart object.
        """
        self.input = input_source
        self.series = series
        self.index = index
        self.last_byte = -1
        self.sync_with_source = sync_with_source
        self.update_ranges = update_ranges
        self.show_legend = show_legend
        self.window_size = int(window_size)
        self.smart_filters = smart_filters
        self.map_options = {}
        self.current_selection = []
        self.source = self.get_input(input_source, buffer)
        # get the charts specified by the user
        self.factories = create_chart_factories(chart_type)
        if palette:
            print ("Sorry, custom palettes not supported yet, coming soon!")
        # define charts init parameters specified from cmd line and create chart
        self.chart_args = get_chart_params(
            title, output, show_legend=self.show_legend
        )
        if self.smart_filters:
            self.chart_args['tools'] = "pan,wheel_zoom,box_zoom,reset,save," \
                                       "box_select,lasso_select"
        if map_:
            self.map_options['lat'], self.map_options['lng'] = \
                [float(x) for x in map_.strip().split(',')]
            self.map_options['zoom'] = int(map_zoom)
            # Yeah, unfortunate namings.. :-)
            self.map_options['map_type'] = map_layer
    def on_selection_changed(self, obj, attrname, old, new):
        self.current_selection = new
    def limit_source(self, source):
        """ Limit source to cli.window_size, if set.
        Args:
            source (mapping): dict-like object
        """
        if self.window_size:
            for key in source.keys():
                source[key] = source[key][-self.window_size:]
    def run(self):
        """ Start the CLI logic creating the input source, data conversions,
        chart instances to show and all other niceties provided by CLI
        """
        try:
            self.limit_source(self.source)
            children = []
            if self.smart_filters:
                copy_selection = Button(label="copy current selection")
                copy_selection.on_click(self.on_copy)
                children.append(copy_selection)
            self.chart = create_chart(
                self.series, self.source, self.index, self.factories,
                self.map_options, children=children, **self.chart_args
            )
            self.chart.show()
            self.has_ranged_x_axis = 'ranged_x_axis' in self.source.columns
            self.columns = [c for c in self.source.columns if c != 'ranged_x_axis']
            if self.smart_filters:
                for chart in self.chart.charts:
                    chart.source.on_change('selected', self, 'on_selection_changed')
                self.chart.session.poll_document(self.chart.doc)
        except TypeError:
            if not self.series:
                series_list = ', '.join(self.chart.values.keys())
                print(hm.ERR_MSG_TEMPL % series_list)
                raise
        if self.sync_with_source:
            keep_source_input_sync(self.input, self.update_source, self.last_byte)
    def on_copy(self, *args, **kws):
        print("COPYING CONTENT!")
        # TODO: EXPERIMENTAL!!! THIS EXPOSE MANY SECURITY ISSUES AND SHOULD
        #       BE REMOVED ASAP!
        txt = ''
        for rowind in self.current_selection:
            row = self.source.iloc[rowind]
            txt += u"%s\n" % (u",".join(str(row[c]) for c in self.columns))
        os.system("echo '%s' | pbcopy" % txt)
    def update_source(self, new_source):
        """ Update self.chart source with the new data retrieved from
         new_source. It is done by parsing the new source line,
         trasforming it to data to be appended to self.chart source
         updating it on chart.session and actually updating chart.session
         objects.
        Args:
            new_source (str): string that contains the new source row to
                read to the current chart source.
        """
        ns = pd.read_csv(StringIO(new_source), names=self.columns)
        len_source = len(self.source)
        if self.has_ranged_x_axis:
            ns['ranged_x_axis'] = [len_source]
            self.index = 'ranged_x_axis'
        ns.index = [len_source]
        self.source = pd.concat([self.source, ns])
        # TODO: This should be replaced with something that just computes
        #       the new data and source
        fig = create_chart(self.series, ns, self.index, self.factories,
                          self.map_options, **self.chart_args)
        for i, _c in enumerate(fig.charts):
            if not isinstance(_c, bc.GMap):
                # TODO: nested charts are getting ridiculous. Need a better
                #       better interface for charts :-)
                scc = self.chart.charts[i]
                for k, v in _c.source.data.items():
                    scc.source.data[k] = list(scc.source.data[k]) + list(v)
                self.limit_source(scc.source.data)
                chart = scc.chart
                chart.session.store_objects(scc.source)
                if self.update_ranges:
                    plot = chart.plot
                    plot.y_range.start = min(
                        plot.y_range.start, _c.chart.plot.y_range.start
                    )
                    plot.y_range.end = max(
                        plot.y_range.end, _c.chart.plot.y_range.end
                    )
                    plot.x_range.start = min(
                        plot.x_range.start, _c.chart.plot.x_range.start
                    )
                    plot.x_range.end = max(
                        plot.x_range.end, _c.chart.plot.x_range.end
                    )
                    chart.session.store_objects(plot)
    def get_input(self, filepath, buffer):
        """Parse received input options. If buffer is not false (=='f') if
        gets input data from input buffer othewise opens file specified in
        sourcefilename,
        Args:
            filepath (str): path to the file to read from to retrieve data
            buffer (str): if == 't' reads data from input buffer
        Returns:
            string read from filepath/buffer
        """
        if buffer != 'f':
            filepath = StringIO(sys.stdin.read())
        elif filepath is None:
            msg = "No Input! Please specify --source_filename or --buffer t"
            raise IOError(msg)
        else:
            if filepath.lower().startswith('http'):
                # Create a request for the given URL.
                request = urllib2.Request(filepath)
                data = get_data_from_url(request)
                self.last_byte = len(data)
            else:
                filepath = open(filepath, 'r').read()
                self.last_byte = len(filepath)
                filepath = StringIO(filepath)
        source = pd.read_csv(filepath)
        return source
def create_chart(series, source, index, factories, map_options=None, children=None, **args):
    """Create charts instances from types specified in factories using
    data series names, source, index and args
    Args:
        series (list(str)): list of strings specifying the names of the
            series to keep from source
        source (DataFrame): pandas DataFrame with the data series to be
            plotted
        index (str): name of the series of source to be used as index.
        factories (list(ChartObject)): list of chart classes to be used
            to create the charts to be plotted
        **args: arguments to pass to the charts when creating them.
    """
    if not index:
        # if no index was specified as for x axis
        # we take a default "range"
        index = 'ranged_x_axis'
        # add the new x range data to the source dataframe
        source[index] = range(len(source[source.columns[0]]))
    indexes = [x for x in index.split(',') if x]
    data_series = get_data_series(series, source, indexes)
    # parse queries to create the charts..
    charts = []
    for chart_type in factories:
        if chart_type == bc.GMap:
            if not map_options or \
                    not all([x in map_options for x in ['lat', 'lng']]):
                raise ValueError("GMap Charts need lat and lon coordinates!")
            all_args = dict(map_options)
            all_args.update(args)
            chart = chart_type(**all_args)
        else:
            if chart_type == bc.TimeSeries:
                # in case the x axis type is datetime that column must be converted to
                # datetime
                data_series[index] = pd.to_datetime(source[index])
            elif chart_type == bc.Scatter:
                if len(indexes) == 1:
                    scatter_ind = [x for x in data_series.pop(indexes[0]).values]
                    scatter_ind = [scatter_ind] * len(data_series)
                else:
                    scatter_ind = []
                    for key in indexes:
                        scatter_ind.append([x for x in data_series.pop(key).values])
                    if len(scatter_ind) != len(data_series):
                        err_msg = "Number of multiple indexes must be equals" \
                                  " to the number of series"
                        raise ValueError(err_msg)
                for ind, key in enumerate(data_series):
                    values = data_series[key].values
                    data_series[key] = zip(scatter_ind[ind], values)
            chart = chart_type(data_series, **args)
            if hasattr(chart, 'index'):
                chart.index = index
        charts.append(chart)
    fig = bc_utils.Figure(*charts, children=children, **args)
    return fig
def create_chart_factories(chart_types):
    """Receive the chart type(s) specified by the user and build a
    list of the their related functions.
    Args:
        series (str): string that contains the name of the
            chart classes to use when creating the chart, separated by `,`
    example:
    >> create_chart_factories('Line,step')
      [Line, Step]
    """
    return [get_chart(name) for name in chart_types.split(',') if name]
def get_chart(class_name):
    """Return the bokeh class specified in class_name.
    Args:
        class_name (str): name of the chart class to return (i.e.: Line|step)
    """
    return CHARTS_MAP[class_name.strip().lower()]
if __name__ == '__main__':
    cli()
 | 
	bsd-3-clause | 
| 
	ZhukovGreen/UMLND | 
	Lectures/precision_recall.py | 
	1 | 
	1601 | 
	# As with the previous exercises, let's look at the performance of a couple of classifiers
# on the familiar Titanic dataset. Add a train/test split, then store the results in the
# dictionary provided.
import pandas as pd
from sklearn import cross_validation
import numpy as np
# Load the dataset
X = pd.read_csv('titanic_data.csv')
X = X._get_numeric_data()
y = X['Survived']
del X['Age'], X['Survived']
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import recall_score as recall
from sklearn.metrics import precision_score as precision
from sklearn.naive_bayes import GaussianNB
# TODO: split the data into training and testing sets,
# using the standard settings for train_test_split.
# Then, train and test the classifiers with your newly split data instead of X and y.
seed = np.random.seed(5)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, random_state=seed)
clf1 = DecisionTreeClassifier()
clf1.fit(X_train, y_train)
print "Decision Tree recall: {:.2f} and precision: {:.2f}".format(recall(y_test, clf1.predict(X_test)),
                                                                  precision(y_test, clf1.predict(X_test)))
clf2 = GaussianNB()
clf2.fit(X_train, y_train)
print "GaussianNB recall: {:.2f} and precision: {:.2f}".format(recall(y_test, clf2.predict(X_test)),
                                                                  precision(y_test, clf2.predict(X_test)))
results = {
    "Naive Bayes Recall": 0.41,
    "Naive Bayes Precision": 0.71,
    "Decision Tree Recall": 0.48,
    "Decision Tree Precision": 0.51
}
 | 
	gpl-3.0 | 
| 
	rossant/phy | 
	phy/gui/tests/test_gui.py | 
	2 | 
	4744 | 
	# -*- coding: utf-8 -*-
"""Test gui."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from pytest import raises
from ..qt import Qt, QApplication, QWidget, QMessageBox
from ..gui import (GUI, GUIState,
                   _try_get_matplotlib_canvas,
                   _try_get_vispy_canvas,
                   )
from phy.utils import Bunch
from phy.utils._color import _random_color
#------------------------------------------------------------------------------
# Utilities and fixtures
#------------------------------------------------------------------------------
def _create_canvas():
    """Create a VisPy canvas with a color background."""
    from vispy import app
    c = app.Canvas()
    c.color = _random_color()
    @c.connect
    def on_draw(e):  # pragma: no cover
        c.context.clear(c.color)
    return c
#------------------------------------------------------------------------------
# Test views
#------------------------------------------------------------------------------
def test_vispy_view():
    from vispy.app import Canvas
    assert isinstance(_try_get_vispy_canvas(Canvas()), QWidget)
def test_matplotlib_view():
    from matplotlib.pyplot import Figure
    assert isinstance(_try_get_matplotlib_canvas(Figure()), QWidget)
#------------------------------------------------------------------------------
# Test GUI
#------------------------------------------------------------------------------
def test_gui_noapp(tempdir):
    if not QApplication.instance():
        with raises(RuntimeError):  # pragma: no cover
            GUI(config_dir=tempdir)
def test_gui_1(tempdir, qtbot):
    gui = GUI(position=(200, 100), size=(100, 100), config_dir=tempdir)
    qtbot.addWidget(gui)
    assert gui.name == 'GUI'
    # Increase coverage.
    @gui.connect_
    def on_show():
        pass
    gui.unconnect_(on_show)
    qtbot.keyPress(gui, Qt.Key_Control)
    qtbot.keyRelease(gui, Qt.Key_Control)
    assert isinstance(gui.dialog("Hello"), QMessageBox)
    view = gui.add_view(_create_canvas(), floating=True, closable=True)
    gui.add_view(_create_canvas())
    view.setFloating(False)
    gui.show()
    assert gui.get_view('Canvas')
    assert len(gui.list_views('Canvas')) == 2
    # Check that the close_widget event is fired when the gui widget is
    # closed.
    _close = []
    @view.connect_
    def on_close_widget():
        _close.append(0)
    @gui.connect_
    def on_close_view(view):
        _close.append(1)
    view.close()
    assert _close == [1, 0]
    gui.close()
    assert gui.state.geometry_state['geometry']
    assert gui.state.geometry_state['state']
    gui.default_actions.exit()
def test_gui_status_message(gui):
    assert gui.status_message == ''
    gui.status_message = ':hello world!'
    assert gui.status_message == ':hello world!'
    gui.lock_status()
    gui.status_message = ''
    assert gui.status_message == ':hello world!'
    gui.unlock_status()
    gui.status_message = ''
    assert gui.status_message == ''
def test_gui_geometry_state(tempdir, qtbot):
    _gs = []
    gui = GUI(size=(100, 100), config_dir=tempdir)
    qtbot.addWidget(gui)
    gui.add_view(_create_canvas(), 'view1')
    gui.add_view(_create_canvas(), 'view2')
    gui.add_view(_create_canvas(), 'view2')
    @gui.connect_
    def on_close():
        _gs.append(gui.save_geometry_state())
    gui.show()
    qtbot.waitForWindowShown(gui)
    assert len(gui.list_views('view')) == 3
    assert gui.view_count() == {
        'view1': 1,
        'view2': 2,
    }
    gui.close()
    # Recreate the GUI with the saved state.
    gui = GUI(config_dir=tempdir)
    gui.add_view(_create_canvas(), 'view1')
    gui.add_view(_create_canvas(), 'view2')
    gui.add_view(_create_canvas(), 'view2')
    @gui.connect_
    def on_show():
        gui.restore_geometry_state(_gs[0])
    assert gui.restore_geometry_state(None) is None
    qtbot.addWidget(gui)
    gui.show()
    assert len(gui.list_views('view')) == 3
    assert gui.view_count() == {
        'view1': 1,
        'view2': 2,
    }
    gui.close()
#------------------------------------------------------------------------------
# Test GUI state
#------------------------------------------------------------------------------
def test_gui_state_view(tempdir):
    view = Bunch(name='MyView0')
    state = GUIState(config_dir=tempdir)
    state.update_view_state(view, dict(hello='world'))
    assert not state.get_view_state(Bunch(name='MyView'))
    assert not state.get_view_state(Bunch(name='MyView1'))
    assert state.get_view_state(view) == Bunch(hello='world')
 | 
	bsd-3-clause | 
| 
	alexvanboxel/airflow | 
	docs/conf.py | 
	33 | 
	8957 | 
	# -*- coding: utf-8 -*-
#
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct  9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
    'apiclient',
    'apiclient.discovery',
    'apiclient.http',
    'mesos',
    'mesos.interface',
    'mesos.native',
    'oauth2client.service_account',
    'pandas.io.gbq',
]
for mod_name in MOCK_MODULES:
    sys.modules[mod_name] = mock.Mock()
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
from airflow import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
    'sphinx.ext.autodoc',
    'sphinx.ext.coverage',
    'sphinx.ext.viewcode',
    'sphinxarg.ext',
]
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
#copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0.0'
# The full version, including alpha/beta/rc tags.
#release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar.  Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
#  author, documentclass [howto, manual, or own class]).
latex_documents = [
  ('index', 'Airflow.tex', u'Airflow Documentation',
   u'Maxime Beauchemin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'airflow', u'Airflow Documentation',
     [u'Maxime Beauchemin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [(
    'index', 'Airflow', u'Airflow Documentation',
    u'Maxime Beauchemin', 'Airflow',
    'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
    'Miscellaneous'
),]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
 | 
	apache-2.0 | 
| 
	tgsmith61591/pyramid | 
	pmdarima/arima/_auto_solvers.py | 
	1 | 
	20432 | 
	# -*- coding: utf-8 -*-
#
# Methods for optimizing auto-arima
from numpy.linalg import LinAlgError
import numpy as np
from datetime import datetime
from joblib import Parallel, delayed
from sklearn.utils import check_random_state
import abc
import functools
import time
import warnings
import traceback
from .arima import ARIMA
from ..warnings import ModelFitWarning
from ._context import ContextType, ContextStore
from . import _validation
from ..compat import statsmodels as sm_compat
def _root_test(model, ic, trace):
    """
    Check the roots of the new model, and set IC to inf if the roots are
    near non-invertible. This is a little bit different than how Rob does it:
    https://github.com/robjhyndman/forecast/blob/master/R/newarima2.R#L780
    In our test, we look directly at the inverse roots to see if they come
    anywhere near the unit circle border
    """
    max_invroot = 0
    p, d, q = model.order
    P, D, Q, m = model.seasonal_order
    if p + P > 0:
        max_invroot = max(0, *np.abs(1 / model.arroots()))
    if q + Q > 0 and np.isfinite(ic):
        max_invroot = max(0, *np.abs(1 / model.maroots()))
    if max_invroot > 1 - 1e-2:
        ic = np.inf
        if trace > 1:
            print(
                "Near non-invertible roots for order "
                "(%i, %i, %i)(%i, %i, %i, %i); setting score to inf (at "
                "least one inverse root too close to the border of the "
                "unit circle: %.3f)"
                % (p, d, q, P, D, Q, m, max_invroot))
    return ic
class _SolverMixin(metaclass=abc.ABCMeta):
    """The solver interface implemented by wrapper classes"""
    @abc.abstractmethod
    def solve(self):
        """Must be implemented by subclasses"""
class _RandomFitWrapper(_SolverMixin):
    """Searches for the best model using a random search"""
    def __init__(self, y, X, fit_partial, d, D, m, max_order,
                 max_p, max_q, max_P, max_Q, random, random_state,
                 n_fits, n_jobs, seasonal, trace, with_intercept,
                 sarimax_kwargs):
        # NOTE: pre-1.5.2, we started at start_p, start_q, etc. However, when
        # using stepwise=FALSE in R, hyndman starts at 0. He only uses start_*
        # when stepwise=TRUE.
        # generate the set of (p, q, P, Q) FIRST, since it is contingent
        # on whether or not the user is interested in a seasonal ARIMA result.
        # This will reduce the search space for non-seasonal ARIMA models.
        # loop p, q. Make sure to loop at +1 interval,
        # since max_{p|q} is inclusive.
        if seasonal:
            gen = (
                ((p, d, q), (P, D, Q, m))
                for p in range(0, max_p + 1)
                for q in range(0, max_q + 1)
                for P in range(0, max_P + 1)
                for Q in range(0, max_Q + 1)
                if p + q + P + Q <= max_order
            )
        else:
            # otherwise it's not seasonal and we don't need the seasonal pieces
            gen = (
                ((p, d, q), (0, 0, 0, 0))
                for p in range(0, max_p + 1)
                for q in range(0, max_q + 1)
                if p + q <= max_order
            )
        # if we are fitting a random search rather than an exhaustive one, we
        # will scramble up the generator (as a list) and only fit n_iter ARIMAs
        if random:
            random_state = check_random_state(random_state)
            # make a list to scramble...
            gen = random_state.permutation(list(gen))[:n_fits]
        self.gen = gen
        self.n_jobs = n_jobs
        self.trace = trace
        # New partial containing y, X
        self.fit_partial = functools.partial(
            fit_partial,
            y=y,
            X=X,
            with_intercept=with_intercept,
            **sarimax_kwargs,
        )
    def solve(self):
        """Do a random search"""
        fit_partial = self.fit_partial
        n_jobs = self.n_jobs
        gen = self.gen
        # get results in parallel
        all_res = Parallel(n_jobs=n_jobs)(
            delayed(fit_partial)(
                order=order,
                seasonal_order=seasonal_order,
            )
            for order, seasonal_order in gen
        )
        sorted_fits = _sort_and_filter_fits(all_res)
        if self.trace and sorted_fits:
            print(f"\nBest model: {str(sorted_fits[0])}")
        return sorted_fits
class _StepwiseFitWrapper(_SolverMixin):
    """Searches for the best model using the stepwise algorithm.
    
    The stepwise algorithm fluctuates the more sensitive pieces of the ARIMA
    (the seasonal components) first, adjusting towards the direction of the
    smaller {A|B|HQ}IC(c), and continues to step down as long as the error
    shrinks. As long as the error term decreases and the best parameters have
    not shifted to a point where they can no longer change, ``k`` will
    increase, and the models will continue to be fit until the ``max_k`` is
    reached.
    References
    ----------
    .. [1] R's auto-arima stepwise source code: https://github.com/robjhyndman/forecast/blob/30308a4e314ff29338291462e81bf68ff0c5f86d/R/newarima2.R#L366
    .. [2] https://robjhyndman.com/hyndsight/arma-roots/
    """  # noqa
    def __init__(self, y, X, start_params, trend, method, maxiter,
                 fit_params, suppress_warnings, trace, error_action,
                 out_of_sample_size, scoring, scoring_args,
                 p, d, q, P, D, Q, m, max_p, max_q, max_P, max_Q, seasonal,
                 information_criterion, with_intercept, **kwargs):
        self.trace = _validation.check_trace(trace)
        # Create a partial of the fit call so we don't have arg bloat all over
        self._fit_arima = functools.partial(
            _fit_candidate_model,
            y=y,
            X=X,
            start_params=start_params,
            trend=trend,
            method=method,
            maxiter=maxiter,
            fit_params=fit_params,
            suppress_warnings=suppress_warnings,
            trace=self.trace,
            error_action=error_action,
            out_of_sample_size=out_of_sample_size,
            scoring=scoring,
            scoring_args=scoring_args,
            information_criterion=information_criterion,
            **kwargs)
        self.information_criterion = information_criterion
        self.with_intercept = with_intercept
        # order stuff we will be incrementing
        self.p = p
        self.d = d
        self.q = q
        self.P = P
        self.D = D
        self.Q = Q
        self.m = m
        self.max_p = max_p
        self.max_q = max_q
        self.max_P = max_P
        self.max_Q = max_Q
        self.seasonal = seasonal
        # execution context passed through the context manager
        self.exec_context = ContextStore.get_or_empty(ContextType.STEPWISE)
        # other internal start vars
        self.k = self.start_k = 0
        self.max_k = 100 if self.exec_context.max_steps is None \
            else self.exec_context.max_steps
        self.max_dur = self.exec_context.max_dur
        # results list to store intermittent hashes of orders to determine if
        # we've seen this order before...
        self.results_dict = dict()  # dict[tuple -> ARIMA]
        self.ic_dict = dict()  # dict[tuple -> float]
        self.fit_time_dict = dict()  # dict[tuple -> float]
        self.bestfit = None
        self.bestfit_key = None  # (order, seasonal_order, constant)
    def _do_fit(self, order, seasonal_order, constant=None):
        """Do a fit and determine whether the model is better"""
        if not self.seasonal:
            seasonal_order = (0, 0, 0, 0)
        seasonal_order = sm_compat.check_seasonal_order(seasonal_order)
        # we might be fitting without a constant
        if constant is None:
            constant = self.with_intercept
        if (order, seasonal_order, constant) not in self.results_dict:
            # increment the number of fits
            self.k += 1
            fit, fit_time, new_ic = self._fit_arima(
                order=order,
                seasonal_order=seasonal_order,
                with_intercept=constant)
            # use the orders as a key to be hashed for
            # the dictionary (pointing to fit)
            self.results_dict[(order, seasonal_order, constant)] = fit
            # cache this so we can lookup best model IC downstream
            self.ic_dict[(order, seasonal_order, constant)] = new_ic
            self.fit_time_dict[(order, seasonal_order, constant)] = fit_time
            # Determine if the new fit is better than the existing fit
            if fit is None or np.isinf(new_ic):
                return False
            # no benchmark model
            if self.bestfit is None:
                self.bestfit = fit
                self.bestfit_key = (order, seasonal_order, constant)
                if self.trace > 1:
                    print("First viable model found (%.3f)" % new_ic)
                return True
            # otherwise there's a current best
            current_ic = self.ic_dict[self.bestfit_key]
            if new_ic < current_ic:
                if self.trace > 1:
                    print("New best model found (%.3f < %.3f)"
                          % (new_ic, current_ic))
                self.bestfit = fit
                self.bestfit_key = (order, seasonal_order, constant)
                return True
        # we've seen this model before
        return False
    def solve(self):
        start_time = datetime.now()
        p, d, q = self.p, self.d, self.q
        P, D, Q, m = self.P, self.D, self.Q, self.m
        max_p, max_q = self.max_p, self.max_q
        max_P, max_Q = self.max_P, self.max_Q
        if self.trace:
            print("Performing stepwise search to minimize %s"
                  % self.information_criterion)
        # fit a baseline p, d, q model
        self._do_fit((p, d, q), (P, D, Q, m))
        # null model with possible constant
        if self._do_fit((0, d, 0), (0, D, 0, m)):
            p = q = P = Q = 0
        # A basic AR model
        if max_p > 0 or max_P > 0:
            _p = 1 if max_p > 0 else 0
            _P = 1 if (m > 1 and max_P > 0) else 0
            if self._do_fit((_p, d, 0), (_P, D, 0, m)):
                p = _p
                P = _P
                q = Q = 0
        # Basic MA model
        if max_q > 0 or max_Q > 0:
            _q = 1 if max_q > 0 else 0
            _Q = 1 if (m > 1 and max_Q > 0) else 0
            if self._do_fit((0, d, _q), (0, D, _Q, m)):
                p = P = 0
                Q = _Q
                q = _q
        # Null model with NO constant (if we haven't tried it yet)
        if self.with_intercept:
            if self._do_fit((0, d, 0), (0, D, 0, m), constant=False):
                p = q = P = Q = 0
        while self.start_k < self.k < self.max_k:
            self.start_k = self.k
            # break loop if execution time exceeds the timeout threshold. needs
            # to be at front of loop, since a single pass may reach termination
            # criteria by end and we only want to warn and break if the loop is
            # continuing again
            dur = (datetime.now() - start_time).total_seconds()
            if self.max_dur and dur > self.max_dur:
                warnings.warn('early termination of stepwise search due to '
                              'max_dur threshold (%.3f > %.3f)'
                              % (dur, self.max_dur))
                break
            # NOTE: k changes for every fit, so we might need to bail halfway
            # through the loop, hence the multiple checks.
            if P > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P - 1, D, Q, m)):
                P -= 1
                continue
            if Q > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P, D, Q - 1, m)):
                Q -= 1
                continue
            if P < max_P and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P + 1, D, Q, m)):
                P += 1
                continue
            if Q < max_Q and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P, D, Q + 1, m)):
                Q += 1
                continue
            if Q > 0 and P > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P - 1, D, Q - 1, m)):
                Q -= 1
                P -= 1
                continue
            if Q < max_Q and P > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P - 1, D, Q + 1, m)):
                Q += 1
                P -= 1
                continue
            if Q > 0 and P < max_P and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P + 1, D, Q - 1, m)):
                Q -= 1
                P += 1
                continue
            if Q < max_Q and P < max_P and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q), (P + 1, D, Q + 1, m)):
                Q += 1
                P += 1
                continue
            if p > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p - 1, d, q), (P, D, Q, m)):
                p -= 1
                continue
            if q > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q - 1), (P, D, Q, m)):
                q -= 1
                continue
            if p < max_p and \
                    self.k < self.max_k and \
                    self._do_fit((p + 1, d, q), (P, D, Q, m)):
                p += 1
                continue
            if q < max_q and \
                    self.k < self.max_k and \
                    self._do_fit((p, d, q + 1), (P, D, Q, m)):
                q += 1
                continue
            if q > 0 and p > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p - 1, d, q - 1), (P, D, Q, m)):
                q -= 1
                p -= 1
                continue
            if q < max_q and p > 0 and \
                    self.k < self.max_k and \
                    self._do_fit((p - 1, d, q + 1), (P, D, Q, m)):
                q += 1
                p -= 1
                continue
            if q > 0 and p < max_p and \
                    self.k < self.max_k and \
                    self._do_fit((p + 1, d, q - 1), (P, D, Q, m)):
                q -= 1
                p += 1
                continue
            if q < max_q and p < max_p and \
                    self.k < self.max_k and \
                    self._do_fit((p + 1, d, q + 1), (P, D, Q, m)):
                q += 1
                p += 1
                continue
            # R: if (allowdrift || allowmean)
            # we don't have these args, so we just default this case to true to
            # evaluate all corners
            if self.k < self.max_k and \
                    self._do_fit((p, d, q),
                                 (P, D, Q, m),
                                 constant=not self.with_intercept):
                self.with_intercept = not self.with_intercept
                continue
        # check if the search has been ended after max_steps
        if self.exec_context.max_steps is not None \
                and self.k >= self.exec_context.max_steps:
            warnings.warn('stepwise search has reached the maximum number '
                          'of tries to find the best fit model')
        # TODO: if (approximation && !is.null(bestfit$arma)) - refit best w MLE
        filtered_models_ics = sorted(
            [(v, self.fit_time_dict[k], self.ic_dict[k])
             for k, v in self.results_dict.items()
             if v is not None],
            key=(lambda fit_ic: fit_ic[1]),
        )
        sorted_fits = _sort_and_filter_fits(filtered_models_ics)
        if self.trace and sorted_fits:
            print(f"\nBest model: {str(sorted_fits[0])}")
        return sorted_fits
def _fit_candidate_model(y,
                         X,
                         order,
                         seasonal_order,
                         start_params,
                         trend,
                         method,
                         maxiter,
                         fit_params,
                         suppress_warnings,
                         trace,
                         error_action,
                         out_of_sample_size,
                         scoring,
                         scoring_args,
                         with_intercept,
                         information_criterion,
                         **kwargs):
    """Instantiate and fit a candidate model
    1. Initialize a model
    2. Fit model
    3. Perform a root test
    4. Return model, information criterion
    """
    start = time.time()
    fit_time = np.nan
    ic = np.inf
    # Fit outside try block, so if there is a type error in user input we
    # don't mask it with a warning or worse
    fit = ARIMA(order=order, seasonal_order=seasonal_order,
                start_params=start_params, trend=trend, method=method,
                maxiter=maxiter, suppress_warnings=suppress_warnings,
                out_of_sample_size=out_of_sample_size, scoring=scoring,
                scoring_args=scoring_args,
                with_intercept=with_intercept, **kwargs)
    try:
        fit.fit(y, X=X, **fit_params)
    # for non-stationarity errors or singular matrices, return None
    except (LinAlgError, ValueError) as v:
        if error_action == "raise":
            raise v
        elif error_action in ("warn", "trace"):
            warning_str = 'Error fitting %s ' \
                          '(if you do not want to see these warnings, run ' \
                          'with error_action="ignore").' \
                          % str(fit)
            if error_action == 'trace':
                warning_str += "\nTraceback:\n" + traceback.format_exc()
            warnings.warn(warning_str, ModelFitWarning)
    else:
        fit_time = time.time() - start
        ic = getattr(fit, information_criterion)()  # aic, bic, aicc, etc.
        # check the roots of the new model, and set IC to inf if the
        # roots are near non-invertible
        ic = _root_test(fit, ic, trace)
    # log the model fit
    if trace:
        print(
            "{model}   : {ic_name}={ic:.3f}, Time={time:.2f} sec"
            .format(model=str(fit),
                    ic_name=information_criterion.upper(),
                    ic=ic,
                    time=fit_time)
        )
    return fit, fit_time, ic
def _sort_and_filter_fits(models):
    """Sort the results in ascending order, by information criterion
    If there are no suitable models, raise a ValueError.
    Otherwise, return ``a``. In the case that ``a`` is an iterable
    (i.e., it made it to the end of the function), this method will
    filter out the None values and assess whether the list is empty.
    Parameters
    ----------
    models : tuple or list
        The list or (model, fit_time, information_criterion), or a single tuple
    """
    # if it's a result of making it to the end, it will be a list of models
    if not isinstance(models, list):
        models = [models]
    # Filter out the Nones or Infs (the failed models)...
    filtered = [(mod, ic) for mod, _, ic in models
                if mod is not None and np.isfinite(ic)]
    # if the list is empty, or if it was an ARIMA and it's None
    if not filtered:
        raise ValueError(
            "Could not successfully fit a viable ARIMA model "
            "to input data.\nSee "
            "http://alkaline-ml.com/pmdarima/no-successful-model.html "
            "for more information on why this can happen."
        )
    # sort by the criteria - lower is better for both AIC and BIC
    # (https://stats.stackexchange.com/questions/81427/aic-guidelines-in-model-selection)  # noqa
    sorted_res = sorted(filtered, key=(lambda mod_ic: mod_ic[1]))
    # TODO: break ties with fit time?
    models, _ = zip(*sorted_res)
    return models
 | 
	mit | 
| 
	ScienceStacks/SciSheets | 
	mysite/scisheets/plugins/test_tabularize.py | 
	2 | 
	2226 | 
	""" Tests for tabularize. """
from mysite import settings
from scisheets.core.api import APIFormulas, APIPlugin
from scisheets.core.table import Table
from CommonUtil.is_null import isNan
from tabularize import tabularize, _delElement
import pandas as pd
import os
import unittest
CATEGORY_COLNM = 'category'
VALUES_COLNM = 'values'
SFX_NAMES = ['a', 'b']
OTHER_NAMES = ['x', 'y']
VALUES = range(4)
class TestTabularize(unittest.TestCase):
  def setUp(self):
    cat_values = []
    for o in OTHER_NAMES:
      for s in SFX_NAMES:
        cat_values.append([o, s])
    val_dict = {CATEGORY_COLNM: cat_values,
        VALUES_COLNM:  VALUES,
        }
    df = pd.DataFrame(val_dict)
    self.api = APIFormulas(Table("Dummy"))
    self.api.addColumnsToTableFromDataframe(df)
  def testDelElement(self):
    size = 4
    values = range(size)
    for idx in range(size):
      expected_list = list(values)
      del expected_list[idx]
      self.assertTrue(expected_list == _delElement(values, idx))
  def testSimple(self):
    new_category_colnm = "NewCategory"
    values_colnm_prefix = "Col"
    tabularize(self.api, CATEGORY_COLNM, 1, VALUES_COLNM,
        new_category_colnm=new_category_colnm,
        values_colnm_prefix=values_colnm_prefix)
    table = self.api._table
    self.assertTrue(table.isColumnPresent(new_category_colnm))
    for sfx in SFX_NAMES:
      expected_name = "%s%s" % (values_colnm_prefix, sfx)
      self.assertTrue(table.isColumnPresent(expected_name))
      column = table.columnFromName(expected_name, is_relative=True)
      cells = [x for x in column.getCells() if not isNan(x)]
      size = len(VALUES)/len(SFX_NAMES)
      self.assertEqual(len(cells), size)
  def testFromFile1(self):
    filepath = os.path.join(settings.SCISHEETS_TEST_DIR, 
                            "tabularize_test.pcl")
    api = APIPlugin(filepath)
    api.initialize()
    tabularize(api, 'Groups', 1, 'MeanCt',
        new_category_colnm='BioRuns',
        values_colnm_prefix='Gene_')
    BioRuns = api.getColumnValue('BioRuns')
    Gene_I = api.getColumnValue('Gene_I')
    Gene_R1 = api.getColumnValue('Gene_R1')
    Gene_R2 = api.getColumnValue('Gene_R2')
if __name__ == '__main__':
  unittest.main()
 | 
	apache-2.0 | 
| 
	Kirubaharan/hydrology | 
	scrap.py | 
	1 | 
	1044 | 
	__author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
import operator
import mpl_toolkits.mplot3d.axes3d
from numpy import pi, arange, sin, linspace
from bokeh.models import LinearAxis, Range1d
from bokeh.plotting import figure, show, output_file
# t = np.linspace(0,10,40)
#
# y = np.sin(t)
# z = np.sin(t)
# print t
# print y
# length = np.sqrt(y**2 + z **2)
# print length
# ax1 = plt.subplot(111,projection='3d')
# line, = ax1.plot(t,y,z,color='r',lw=2)
# arrow_1 = ax1.plot(t[0:2]*1.5, length[0:2], z[0:2], lw=3)
#
# plt.show()
x = arange(-2*pi, 2*pi, 0.1)
y = sin(x)
y2 = linspace(0, 100, len(x))
p = figure(x_range=(-6.5, 6.5), y_range=(-1.1, 1.1), min_border=80)
p.circle(x, y, fill_color="red", size=5, line_color="black")
p.extra_y_ranges['foo'] = Range1d(0, 100)
p.circle(x, y2, fill_color="blue", size=5, line_color="black", y_range_name="foo")
p.add_layout(LinearAxis(y_range_name="foo"), 'left')
output_file("twin_axis.html", title="twin_axis.py example")
show(p)
 | 
	gpl-3.0 | 
| 
	altMITgcm/MITgcm66h | 
	utils/python/MITgcmutils/MITgcmutils/cs/pcol.py | 
	1 | 
	7026 | 
	import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def pcol( x, y, data, projection=None, vmin=None, vmax=None, **kwargs):
    """function h=pcol(x,y,v)
    function h=pcol(x,y,v, projection = mp )
    
    plots 2D scalar fields v on the MITgcm cubed sphere grid with pcolormesh.
    x,y are really 'xg', and 'yg', that is, they should be the coordinates
    of the points one half grid cell to the left and bottom, that is
    vorticity points for tracers, etc. 
    
    If present, 'projection' (a basemap instance) is used to transform 
    coordinates. Unfortunatly, cylindrical and conic maps are limited to 
    the [-180 180] range. 
    projection = 'sphere' results in a 3D visualization on the sphere
    without any specific projection. Good for debugging.
    Example script to use pcol.py:
    from mpl_toolkits.basemap import Basemap
    import MITgcmutils as mit
    import matplotlib.pyplot as plt
    from sq import sq
    x=mit.rdmds('XG'); y=mit.rdmds('YG'); e=mit.rdmds('Eta',np.Inf)
    fig = plt.figure();
    mp = Basemap(projection='moll',lon_0 = 0.,
                 resolution = 'l', area_thresh = 1000.)
    plt.clf()
    h = mit.cs.pcol(x,y,sq(e), projection = mp)
    mp.fillcontinents(color = 'grey')
    mp.drawmapboundary()
    mp.drawmeridians(np.arange(0, 360, 30))
    mp.drawparallels(np.arange(-90, 90, 30))
    plt.show()
    """
# pcol first divides the 2D cs-field(6*n,n) into six faces. Then for
# each face, an extra row and colum is added from the neighboring faces in
# order to fool pcolor into drawing the entire field and not just
# (n-1,m-1) data points. There are two corner points that have no explicit
# coordinates so that they have to be found by
# interpolation/averaging. Then each face is divided into 4 tiles,
# assuming cs-geometry, and each tile is plotted individually in
# order to avoid problems due to ambigous longitude values (the jump
# between -180 and 180, or 360 and 0 degrees). As long as the poles
# are at the centers of the north and south faces and the first tile is
# symmetric about its center this should work.
    # get the figure handle
    fig=plt.gcf()
    mapit = 0
    if projection!=None:
        mp = projection
        if mp=='sphere': mapit=-1
        else: mapit = 1
    # convert to [-180 180[ representation
    x = np.where(x>180,x-360.,x)
    ny,nx = data.shape
    # determine range for color range
    cax = [data.min(),data.max()]
    if cax[1]-cax[0]==0: cax = [cax[0]-1,cax[1]+1]
    if vmin!=None: cax[0]=vmin
    if vmax!=None: cax[1]=vmax
    if mapit == -1:
        # set up 3D plot
        if len(fig.axes)>0: 
            # if present, remove and replace the last axis of fig
            geom=fig.axes[-1].get_geometry()
            plt.delaxes(fig.axes[-1])
        else:
            # otherwise use full figure
            geom = ((1,1,1))
        ax = fig.add_subplot(geom[0],geom[1],geom[2],projection = '3d',
                             axisbg='None')
        # define color range
        tmp = data - data.min()
        N = tmp/tmp.max()       
        # use this colormap
        colmap = cm.jet
        colmap.set_bad('w',1.0)
        mycolmap = colmap(N) #cm.jet(N)
    ph=np.array([])
    jc=x.shape[0]//2
    xxf=np.empty((jc+1,jc+1,4))
    yyf=xxf
    ffld=np.empty((jc,jc,4))
    xff=[]
    yff=[]
    fldf=[]
    for k in range(0,6):
        ix = np.arange(0,ny) + k*ny
        xff.append(x[0:ny,ix])
        yff.append(y[0:ny,ix])
        fldf.append(data[0:ny,ix])
    # find the missing corners by interpolation (one in the North Atlantic)
    xfodd = (xff[0][-1,0]+xff[2][-1,0]+xff[4][-1,0])/3.
    yfodd = (yff[0][-1,0]+yff[2][-1,0]+yff[4][-1,0])/3.
    # and one south of Australia
    xfeven= (xff[1][0,-1]+xff[3][0,-1]+xff[5][0,-1])/3.
    yfeven= (yff[1][0,-1]+yff[3][0,-1]+yff[5][0,-1])/3.
    # loop over tiles
    for k in range(0,6):
        kodd  = 2*(k//2)
        kodd2 = kodd
        if kodd==4: kodd2=kodd-6
        keven  = 2*(k//2)
        keven2 = keven
        if keven==4: keven2=keven-6
        fld = fldf[k]
        if np.mod(k+1,2):
            xf = np.vstack( [ np.column_stack( [xff[k],xff[1+kodd][:,0]] ),
                              np.flipud(np.append(xff[2+kodd2][:,0],xfodd))] )
            yf = np.vstack( [ np.column_stack( [yff[k],yff[1+kodd][:,0]] ),
                              np.flipud(np.append(yff[2+kodd2][:,0],yfodd))] )
        else:
            xf = np.column_stack( [np.vstack( [xff[k],xff[2+keven2][0,:]] ),
                                   np.flipud(np.append(xff[3+keven2][0,:],
                                                       xfeven))] )
            yf = np.column_stack( [np.vstack( [yff[k],yff[2+keven2][0,:]] ),
                                   np.flipud(np.append(yff[3+keven2][0,:],
                                                       yfeven))] )
        if mapit==-1:
            ix = np.arange(0,ny) + k*ny
            # no projection at all (projection argument is 'sphere'), 
            # just convert to cartesian coordinates and plot a 3D sphere
            deg2rad=np.pi/180.
            xcart,ycart,zcart = sph2cart( xf*deg2rad, yf*deg2rad )
            ax.plot_surface(xcart,ycart,zcart,rstride=1,cstride=1,
                            facecolors=mycolmap[0:ny,ix],
                            linewidth=2,shade=False)
            ph = np.append(ph, ax)
        else:
            # divide all faces into 4 because potential problems arise at
            # the centers 
            for kf in range(0,4):
                if   kf==0: i0,i1,j0,j1 =  0,  jc+1, 0,  jc+1
                elif kf==1: i0,i1,j0,j1 =  0,  jc+1,jc,2*jc+1
                elif kf==2: i0,i1,j0,j1 = jc,2*jc+1, 0,  jc+1
                elif kf==3: i0,i1,j0,j1 = jc,2*jc+1,jc,2*jc+1
                xx = xf[i0:i1,j0:j1]
                yy = yf[i0:i1,j0:j1]
                ff = fld[i0:i1-1,j0:j1-1]
                if np.median(xx) < 0:
                    xx = np.where(xx>=180,xx-360.,xx)
                else:
                    xx = np.where(xx<=-180,xx+360.,xx)
             
                # if provided use projection
                if mapit==1: xx,yy = mp(xx,yy)
            
                # now finally plot 4x6 tiles
                ph = np.append(ph, plt.pcolormesh(xx, yy, ff,
                                                  vmin=cax[0], vmax=cax[1],
                                                  **kwargs))
    if mapit == -1: 
        ax.axis('image')
        ax.set_axis_off()
#        ax.set_visible=False
        # add a reasonable colormap
        m = cm.ScalarMappable(cmap=colmap)
        m.set_array(data)
        plt.colorbar(m)
    elif mapit == 0:
        ax = fig.axes[-1]
        ax.axis('image')
        plt.grid('on')
    return ph
def sph2cart(azim_sph_coord, elev_sph_coord):
    r = np.cos(elev_sph_coord)
    x = -r * np.sin(azim_sph_coord)
    y = r * np.cos(azim_sph_coord)
    z = np.sin(elev_sph_coord)
    return x, y, z
 | 
	mit | 
| 
	deepesch/scikit-learn | 
	sklearn/datasets/tests/test_svmlight_format.py | 
	228 | 
	11221 | 
	from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
                              dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
    X, y = load_svmlight_file(datafile)
    # test X's shape
    assert_equal(X.indptr.shape[0], 7)
    assert_equal(X.shape[0], 6)
    assert_equal(X.shape[1], 21)
    assert_equal(y.shape[0], 6)
    # test X's non-zero values
    for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
                     (1, 5, 1.0), (1, 12, -3),
                     (2, 20, 27)):
        assert_equal(X[i, j], val)
    # tests X's zero values
    assert_equal(X[0, 3], 0)
    assert_equal(X[0, 5], 0)
    assert_equal(X[1, 8], 0)
    assert_equal(X[1, 16], 0)
    assert_equal(X[2, 18], 0)
    # test can change X's values
    X[0, 2] *= 2
    assert_equal(X[0, 2], 5)
    # test y
    assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
    # test loading from file descriptor
    X1, y1 = load_svmlight_file(datafile)
    fd = os.open(datafile, os.O_RDONLY)
    try:
        X2, y2 = load_svmlight_file(fd)
        assert_array_equal(X1.data, X2.data)
        assert_array_equal(y1, y2)
    finally:
        os.close(fd)
def test_load_svmlight_file_multilabel():
    X, y = load_svmlight_file(multifile, multilabel=True)
    assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
    X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
                                                           dtype=np.float32)
    assert_array_equal(X_train.toarray(), X_test.toarray())
    assert_array_equal(y_train, y_test)
    assert_equal(X_train.dtype, np.float32)
    assert_equal(X_test.dtype, np.float32)
    X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
                                                 dtype=np.float64)
    assert_equal(X1.dtype, X2.dtype)
    assert_equal(X2.dtype, X3.dtype)
    assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
    X, y = load_svmlight_file(datafile, n_features=22)
    # test X'shape
    assert_equal(X.indptr.shape[0], 7)
    assert_equal(X.shape[0], 6)
    assert_equal(X.shape[1], 22)
    # test X's non-zero values
    for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
                     (1, 5, 1.0), (1, 12, -3)):
        assert_equal(X[i, j], val)
    # 21 features in file
    assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
    X, y = load_svmlight_file(datafile)
    with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
        tmp.close()  # necessary under windows
        with open(datafile, "rb") as f:
            shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
        Xgz, ygz = load_svmlight_file(tmp.name)
        # because we "close" it manually and write to it,
        # we need to remove it manually.
        os.remove(tmp.name)
    assert_array_equal(X.toarray(), Xgz.toarray())
    assert_array_equal(y, ygz)
    with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
        tmp.close()  # necessary under windows
        with open(datafile, "rb") as f:
            shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
        Xbz, ybz = load_svmlight_file(tmp.name)
        # because we "close" it manually and write to it,
        # we need to remove it manually.
        os.remove(tmp.name)
    assert_array_equal(X.toarray(), Xbz.toarray())
    assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
    load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
    load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
    f = BytesIO(b("-1 4:1.\n1 0:1\n"))
    load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
    data1 = b("-1 1:1 2:2 3:3\n")
    data2 = b("-1 0:0 1:1\n")
    f1 = BytesIO(data1)
    X, y = load_svmlight_file(f1, zero_based="auto")
    assert_equal(X.shape, (1, 3))
    f1 = BytesIO(data1)
    f2 = BytesIO(data2)
    X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
    assert_equal(X1.shape, (1, 4))
    assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
    # load svmfile with qid attribute
    data = b("""
    3 qid:1 1:0.53 2:0.12
    2 qid:1 1:0.13 2:0.1
    7 qid:2 1:0.87 2:0.12""")
    X, y = load_svmlight_file(BytesIO(data), query_id=False)
    assert_array_equal(y, [3, 2, 7])
    assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
    res1 = load_svmlight_files([BytesIO(data)], query_id=True)
    res2 = load_svmlight_file(BytesIO(data), query_id=True)
    for X, y, qid in (res1, res2):
        assert_array_equal(y, [3, 2, 7])
        assert_array_equal(qid, [1, 1, 2])
        assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
    load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
    # in python 3 integers are valid file opening arguments (taken as unix
    # file descriptors)
    load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
    load_svmlight_file("trou pic nic douille")
def test_dump():
    Xs, y = load_svmlight_file(datafile)
    Xd = Xs.toarray()
    # slicing a csr_matrix can unsort its .indices, so test that we sort
    # those correctly
    Xsliced = Xs[np.arange(Xs.shape[0])]
    for X in (Xs, Xd, Xsliced):
        for zero_based in (True, False):
            for dtype in [np.float32, np.float64, np.int32]:
                f = BytesIO()
                # we need to pass a comment to get the version info in;
                # LibSVM doesn't grok comments so they're not put in by
                # default anymore.
                dump_svmlight_file(X.astype(dtype), y, f, comment="test",
                                   zero_based=zero_based)
                f.seek(0)
                comment = f.readline()
                try:
                    comment = str(comment, "utf-8")
                except TypeError:  # fails in Python 2.x
                    pass
                assert_in("scikit-learn %s" % sklearn.__version__, comment)
                comment = f.readline()
                try:
                    comment = str(comment, "utf-8")
                except TypeError:  # fails in Python 2.x
                    pass
                assert_in(["one", "zero"][zero_based] + "-based", comment)
                X2, y2 = load_svmlight_file(f, dtype=dtype,
                                            zero_based=zero_based)
                assert_equal(X2.dtype, dtype)
                assert_array_equal(X2.sorted_indices().indices, X2.indices)
                if dtype == np.float32:
                    assert_array_almost_equal(
                        # allow a rounding error at the last decimal place
                        Xd.astype(dtype), X2.toarray(), 4)
                else:
                    assert_array_almost_equal(
                        # allow a rounding error at the last decimal place
                        Xd.astype(dtype), X2.toarray(), 15)
                assert_array_equal(y, y2)
def test_dump_multilabel():
    X = [[1, 0, 3, 0, 5],
         [0, 0, 0, 0, 0],
         [0, 5, 0, 1, 0]]
    y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
    f = BytesIO()
    dump_svmlight_file(X, y, f, multilabel=True)
    f.seek(0)
    # make sure it dumps multilabel correctly
    assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
    assert_equal(f.readline(), b("0,2 \n"))
    assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
    one = 1
    two = 2.1
    three = 3.01
    exact = 1.000000000000001
    # loses the last decimal place
    almost = 1.0000000000000001
    X = [[one, two, three, exact, almost],
         [1e9, 2e18, 3e27, 0, 0],
         [0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0],
         [0, 0, 0, 0, 0]]
    y = [one, two, three, exact, almost]
    f = BytesIO()
    dump_svmlight_file(X, y, f)
    f.seek(0)
    # make sure it's using the most concise format possible
    assert_equal(f.readline(),
                 b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
    assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
    assert_equal(f.readline(), b("3.01 \n"))
    assert_equal(f.readline(), b("1.000000000000001 \n"))
    assert_equal(f.readline(), b("1 \n"))
    f.seek(0)
    # make sure it's correct too :)
    X2, y2 = load_svmlight_file(f)
    assert_array_almost_equal(X, X2.toarray())
    assert_array_equal(y, y2)
def test_dump_comment():
    X, y = load_svmlight_file(datafile)
    X = X.toarray()
    f = BytesIO()
    ascii_comment = "This is a comment\nspanning multiple lines."
    dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
    f.seek(0)
    X2, y2 = load_svmlight_file(f, zero_based=False)
    assert_array_almost_equal(X, X2.toarray())
    assert_array_equal(y, y2)
    # XXX we have to update this to support Python 3.x
    utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
    f = BytesIO()
    assert_raises(UnicodeDecodeError,
                  dump_svmlight_file, X, y, f, comment=utf8_comment)
    unicode_comment = utf8_comment.decode("utf-8")
    f = BytesIO()
    dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
    f.seek(0)
    X2, y2 = load_svmlight_file(f, zero_based=False)
    assert_array_almost_equal(X, X2.toarray())
    assert_array_equal(y, y2)
    f = BytesIO()
    assert_raises(ValueError,
                  dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
    X, y = load_svmlight_file(datafile)
    f = BytesIO()
    y2d = [y]
    assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
    f = BytesIO()
    assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
    # test dumping a file with query_id
    X, y = load_svmlight_file(datafile)
    X = X.toarray()
    query_id = np.arange(X.shape[0]) // 2
    f = BytesIO()
    dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
    f.seek(0)
    X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
    assert_array_almost_equal(X, X1.toarray())
    assert_array_almost_equal(y, y1)
    assert_array_almost_equal(query_id, query_id1)
 | 
	bsd-3-clause | 
| 
	zbanga/trading-with-python | 
	lib/csvDatabase.py | 
	77 | 
	6045 | 
	# -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
    '''convert filename to date'''
    name = os.path.splitext(fName)[0]
    return dt.datetime.strptime(name.split('_')[1],dateFormat).date() 
    
def parseDateTime(dateTimeStr):
    return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
    
def loadCsv(fName):
    ''' load DataFrame from csv file '''
    with open(fName,'r') as f:
        lines = f.readlines()
    
    dates= []    
    header = [h.strip() for h in lines[0].strip().split(',')[1:]]
    data = [[] for i in range(len(header))]
   
    
    for line in lines[1:]:
        fields = line.rstrip().split(',')
        dates.append(parseDateTime(fields[0]))
        for i,field in enumerate(fields[1:]):
            data[i].append(float(field))
     
    return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))    
    
    
class HistDataCsv(object):
    '''class for working with historic database in .csv format'''
    def __init__(self,symbol,dbDir,autoCreateDir=False):
        self.symbol = symbol
        self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
        
        if not os.path.exists(self.dbDir) and autoCreateDir:
            print 'Creating data directory ', self.dbDir
            os.mkdir(self.dbDir)
        
        self.dates = []        
        
        for fName in os.listdir(self.dbDir):
            self.dates.append(fileName2date(fName))
    
    
    def saveData(self,date, df,lowerCaseColumns=True):
        ''' add data to database'''
        
        if lowerCaseColumns: # this should provide consistency to column names. All lowercase
            df.columns = [ c.lower() for c in df.columns]
        
        s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
        dest = os.path.join(self.dbDir,s) # full path destination
        print 'Saving data to: ', dest
        df.to_csv(dest)
    
    def loadDate(self,date):  
        ''' load data '''
        s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
        
        df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
        cols = [col.strip() for col in df.columns.tolist()]
        df.columns = cols
        #df = loadCsv(os.path.join(self.dbDir,s))
       
        return df
        
    def loadDates(self,dates):
        ''' load multiple dates, concantenating to one DataFrame '''
        tmp =[]
        print 'Loading multiple dates for ' , self.symbol        
        p = ProgressBar(len(dates))
        
        for i,date in enumerate(dates):
            tmp.append(self.loadDate(date))
            p.animate(i+1)
            
        print ''
        return pd.concat(tmp)
        
        
    def createOHLC(self):
        ''' create ohlc from intraday data'''
        ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
        
        for date in self.dates:
            
            print 'Processing', date
            try:
                df = self.loadDate(date)
                
                ohlc.set_value(date,'open',df['open'][0])
                ohlc.set_value(date,'high',df['wap'].max())
                ohlc.set_value(date,'low', df['wap'].min())
                ohlc.set_value(date,'close',df['close'][-1])
        
            except Exception as e:
                print 'Could not convert:', e
                
        return ohlc
            
    def __repr__(self):
        return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
        
class HistDatabase(object):
    ''' class working with multiple symbols at once '''
    def __init__(self, dataDir):
        
        # get symbols from directory names
        symbols = []
        for l in os.listdir(dataDir):
            if os.path.isdir(os.path.join(dataDir,l)):
                symbols.append(l)
        
        #build dataset
        self.csv = {} # dict of HistDataCsv halndlers
        for symbol in symbols:
            self.csv[symbol] = HistDataCsv(symbol,dataDir)
    
    
    def loadDates(self,dates=None):
        ''' 
        get data for all symbols as wide panel
        provide a dates list. If no dates list is provided, common dates are used.
        '''
        if dates is None: dates=self.commonDates
        
        tmp = {}
        
        
        for k,v in self.csv.iteritems():
            tmp[k] = v.loadDates(dates)
            
        return pd.WidePanel(tmp)
        
    def toHDF(self,dataFile,dates=None):
        ''' write wide panel data to a hdfstore file '''
        
        if dates is None: dates=self.commonDates
        store = pd.HDFStore(dataFile)        
        wp = self.loadDates(dates)
        
        store['data'] = wp
        store.close()
        
        
        
        
    
    @property 
    def commonDates(self):
        ''' return dates common for all symbols '''
        t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
        
        d = list(set(t[0]).intersection(*t[1:]))
        return sorted(d)
        
     
    def __repr__(self):
        s = '-----Hist CSV Database-----\n'
        for k,v in self.csv.iteritems():
            s+= (str(v)+'\n')
        return s
  
          
#--------------------
if __name__=='__main__':
    dbDir =os.path.normpath('D:/data/30sec')
    vxx = HistDataCsv('VXX',dbDir)
    spy = HistDataCsv('SPY',dbDir)
#   
    date = dt.date(2012,8,31)
    print date
#    
    pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
    
    print pair.tail() | 
	bsd-3-clause | 
| 
	chrsrds/scikit-learn | 
	examples/model_selection/plot_cv_indices.py | 
	20 | 
	5644 | 
	"""
Visualizing cross-validation behavior in scikit-learn
=====================================================
Choosing the right cross-validation object is a crucial part of fitting a
model properly. There are many ways to split data into training and test
sets in order to avoid model overfitting, to standardize the number of
groups in test sets, etc.
This example visualizes the behavior of several common scikit-learn objects
for comparison.
"""
from sklearn.model_selection import (TimeSeriesSplit, KFold, ShuffleSplit,
                                     StratifiedKFold, GroupShuffleSplit,
                                     GroupKFold, StratifiedShuffleSplit)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
np.random.seed(1338)
cmap_data = plt.cm.Paired
cmap_cv = plt.cm.coolwarm
n_splits = 4
###############################################################################
# Visualize our data
# ------------------
#
# First, we must understand the structure of our data. It has 100 randomly
# generated input datapoints, 3 classes split unevenly across datapoints,
# and 10 "groups" split evenly across datapoints.
#
# As we'll see, some cross-validation objects do specific things with
# labeled data, others behave differently with grouped data, and others
# do not use this information.
#
# To begin, we'll visualize our data.
# Generate the class/group data
n_points = 100
X = np.random.randn(100, 10)
percentiles_classes = [.1, .3, .6]
y = np.hstack([[ii] * int(100 * perc)
               for ii, perc in enumerate(percentiles_classes)])
# Evenly spaced groups repeated once
groups = np.hstack([[ii] * 10 for ii in range(10)])
def visualize_groups(classes, groups, name):
    # Visualize dataset groups
    fig, ax = plt.subplots()
    ax.scatter(range(len(groups)),  [.5] * len(groups), c=groups, marker='_',
               lw=50, cmap=cmap_data)
    ax.scatter(range(len(groups)),  [3.5] * len(groups), c=classes, marker='_',
               lw=50, cmap=cmap_data)
    ax.set(ylim=[-1, 5], yticks=[.5, 3.5],
           yticklabels=['Data\ngroup', 'Data\nclass'], xlabel="Sample index")
visualize_groups(y, groups, 'no groups')
###############################################################################
# Define a function to visualize cross-validation behavior
# --------------------------------------------------------
#
# We'll define a function that lets us visualize the behavior of each
# cross-validation object. We'll perform 4 splits of the data. On each
# split, we'll visualize the indices chosen for the training set
# (in blue) and the test set (in red).
def plot_cv_indices(cv, X, y, group, ax, n_splits, lw=10):
    """Create a sample plot for indices of a cross-validation object."""
    # Generate the training/testing visualizations for each CV split
    for ii, (tr, tt) in enumerate(cv.split(X=X, y=y, groups=group)):
        # Fill in indices with the training/test groups
        indices = np.array([np.nan] * len(X))
        indices[tt] = 1
        indices[tr] = 0
        # Visualize the results
        ax.scatter(range(len(indices)), [ii + .5] * len(indices),
                   c=indices, marker='_', lw=lw, cmap=cmap_cv,
                   vmin=-.2, vmax=1.2)
    # Plot the data classes and groups at the end
    ax.scatter(range(len(X)), [ii + 1.5] * len(X),
               c=y, marker='_', lw=lw, cmap=cmap_data)
    ax.scatter(range(len(X)), [ii + 2.5] * len(X),
               c=group, marker='_', lw=lw, cmap=cmap_data)
    # Formatting
    yticklabels = list(range(n_splits)) + ['class', 'group']
    ax.set(yticks=np.arange(n_splits+2) + .5, yticklabels=yticklabels,
           xlabel='Sample index', ylabel="CV iteration",
           ylim=[n_splits+2.2, -.2], xlim=[0, 100])
    ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
    return ax
###############################################################################
# Let's see how it looks for the `KFold` cross-validation object:
fig, ax = plt.subplots()
cv = KFold(n_splits)
plot_cv_indices(cv, X, y, groups, ax, n_splits)
###############################################################################
# As you can see, by default the KFold cross-validation iterator does not
# take either datapoint class or group into consideration. We can change this
# by using the ``StratifiedKFold`` like so.
fig, ax = plt.subplots()
cv = StratifiedKFold(n_splits)
plot_cv_indices(cv, X, y, groups, ax, n_splits)
###############################################################################
# In this case, the cross-validation retained the same ratio of classes across
# each CV split. Next we'll visualize this behavior for a number of CV
# iterators.
#
# Visualize cross-validation indices for many CV objects
# ------------------------------------------------------
#
# Let's visually compare the cross validation behavior for many
# scikit-learn cross-validation objects. Below we will loop through several
# common cross-validation objects, visualizing the behavior of each.
#
# Note how some use the group/class information while others do not.
cvs = [KFold, GroupKFold, ShuffleSplit, StratifiedKFold,
       GroupShuffleSplit, StratifiedShuffleSplit, TimeSeriesSplit]
for cv in cvs:
    this_cv = cv(n_splits=n_splits)
    fig, ax = plt.subplots(figsize=(6, 3))
    plot_cv_indices(this_cv, X, y, groups, ax, n_splits)
    ax.legend([Patch(color=cmap_cv(.8)), Patch(color=cmap_cv(.02))],
              ['Testing set', 'Training set'], loc=(1.02, .8))
    # Make the legend fit
    plt.tight_layout()
    fig.subplots_adjust(right=.7)
plt.show()
 | 
	bsd-3-clause | 
| 
	tansey/gfl | 
	pygfl/trendfiltering.py | 
	1 | 
	13140 | 
	'''Copyright (C) 2016 by Wesley Tansey
    This file is part of the GFL library.
    The GFL library is free software: you can redistribute it and/or modify
    it under the terms of the GNU Lesser General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.
    The GFL library is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU Lesser General Public License for more details.
    You should have received a copy of the GNU Lesser General Public License
    along with the GFL library.  If not, see <http://www.gnu.org/licenses/>.
'''
import matplotlib.pylab as plt
import numpy as np
from numpy.ctypeslib import ndpointer
from scipy.sparse import coo_matrix, csr_matrix
from collections import defaultdict
from ctypes import *
from pygfl.utils import *
'''Load the graph trend filtering library'''
graphfl_lib = cdll.LoadLibrary('libgraphfl.so')
weighted_graphtf = graphfl_lib.graph_trend_filtering_weight_warm
weighted_graphtf.restype = c_int
weighted_graphtf.argtypes = [c_int, ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'), c_double,
                    c_int, c_int, c_int,
                    ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'),
                    c_int, c_double,
                    ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
weighted_graphtf_logit = graphfl_lib.graph_trend_filtering_logit_warm
weighted_graphtf_logit.restype = c_int
weighted_graphtf_logit.argtypes = [c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'), c_double,
                    c_int, c_int, c_int,
                    ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'),
                    c_int, c_double,
                    ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
weighted_graphtf_poisson = graphfl_lib.graph_trend_filtering_poisson_warm
weighted_graphtf_poisson.restype = c_int
weighted_graphtf_poisson.argtypes = [c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), c_double,
                    c_int, c_int, c_int,
                    ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'),
                    c_int, c_double,
                    ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
class TrendFilteringSolver:
    def __init__(self, maxsteps=3000, converge=1e-6):
        self.maxsteps = maxsteps
        self.converge = converge
    def set_data(self, D, k, y, weights=None):
        self.y = y
        self.weights = weights if weights is not None else np.ones(len(self.y), dtype='double')
        self.initialize(D, k)
    def initialize(self, D, k):
        self.nnodes = len(self.y)
        self.D = D
        self.k = k
        self.Dk = get_delta(D, k).tocoo()
        self.Dk_minus_one = get_delta(self.D, self.k-1) if self.k > 0 else None
        self.beta = np.zeros(self.nnodes, dtype='double')
        self.steps = []
        self.u = np.zeros(self.Dk.shape[0], dtype='double')
        self.edges = None
    def solve(self, lam):
        '''Solves the GFL for a fixed value of lambda.'''
        s = weighted_graphtf(self.nnodes, self.y, self.weights, lam,
                             self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
                             self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
                             self.maxsteps, self.converge,
                             self.beta, self.u)
        self.steps.append(s)
        return self.beta
    def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0):
        '''Follows the solution path to find the best lambda value.'''
        self.u = np.zeros(self.Dk.shape[0], dtype='double')
        lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
        aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
        aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
        bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
        dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
        log_likelihood_trace = np.zeros(lambda_grid.shape)
        beta_trace = []
        best_idx = None
        best_plateaus = None
        if self.edges is None:
            self.edges = defaultdict(list)
            elist = csr_matrix(self.D).indices.reshape((self.D.shape[0], 2))
            for n1, n2 in elist:
                self.edges[n1].append(n2)
                self.edges[n2].append(n1)
        # Solve the series of lambda values with warm starts at each point
        for i, lam in enumerate(lambda_grid):
            if verbose:
                print('#{0} Lambda = {1}'.format(i, lam))
            # Fit to the final values
            beta = self.solve(lam)
            if verbose:
                print('Calculating degrees of freedom')
            # Count the number of free parameters in the grid (dof) -- TODO: the graph trend filtering paper seems to imply we shouldn't multiply by (k+1)?
            dof_vals = self.Dk_minus_one.dot(beta) if self.k > 0 else beta
            plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=0.01) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=0.03)
            dof_trace[i] = max(1,len(plateaus)) #* (k+1)
            if verbose:
                print('Calculating Information Criteria')
            # Get the negative log-likelihood
            log_likelihood_trace[i] = -0.5 * ((self.y - beta)**2).sum()
            # Calculate AIC = 2k - 2ln(L)
            aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
            
            # Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
            aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.)
            # Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
            bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi))
            # Track the best model thus far
            if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
                best_idx = i
                best_plateaus = plateaus
            # Save the trace of all the resulting parameters
            beta_trace.append(np.array(beta))
            
            if verbose:
                print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}\n'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
        if verbose:
            print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
        return {'aic': aic_trace,
                'aicc': aicc_trace,
                'bic': bic_trace,
                'dof': dof_trace,
                'loglikelihood': log_likelihood_trace,
                'beta': np.array(beta_trace),
                'lambda': lambda_grid,
                'best_idx': best_idx,
                'best': beta_trace[best_idx],
                'plateaus': best_plateaus}
class LogitTrendFilteringSolver(TrendFilteringSolver):
    def __init__(self, maxsteps=3000, converge=1e-6):
        self.maxsteps = maxsteps
        self.converge = converge
    def set_data(self, D, k, trials, successes):
        self.trials = trials
        self.successes = successes
        self.initialize(D, k)
    def solve(self, lam):
        '''Solves the GFL for a fixed value of lambda.'''
        s = weighted_graphtf_logit(self.nnodes, self.trials, self.successes, lam,
                                 self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
                                 self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
                                 self.maxsteps, self.converge,
                                 self.beta, self.u)
        self.steps.append(s)
        return self.beta
class PoissonTrendFilteringSolver(TrendFilteringSolver):
    def __init__(self, maxsteps=3000, converge=1e-6):
        self.maxsteps = maxsteps
        self.converge = converge
    def set_data(self, D, k, obs):
        self.obs = obs
        self.initialize(D, k)
    def solve(self, lam):
        '''Solves the GFL for a fixed value of lambda.'''
        s = weighted_graphtf_poisson(self.nnodes, self.obs, lam,
                                 self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
                                 self.Dk.row.astype('int32'), self.Dk.col.astype('int32'), self.Dk.data.astype('double'),
                                 self.maxsteps, self.converge,
                                 self.beta, self.u)
        self.steps.append(s)
        return self.beta
def test_solve_gtf():
    # Load the data and create the penalty matrix
    max_k = 3
    y = (np.sin(np.linspace(-np.pi, np.pi, 100)) + 1) * 5
    y[25:75] += np.sin(np.linspace(1.5*-np.pi, np.pi*2, 50))*5 ** (np.abs(np.arange(50) / 25.))
    y += np.random.normal(0,1.0,size=len(y))
    # np.savetxt('/Users/wesley/temp/tfdata.csv', y, delimiter=',')
    # y = np.loadtxt('/Users/wesley/temp/tfdata.csv', delimiter=',')
    mean_offset = y.mean()
    y -= mean_offset
    stdev_offset = y.std()
    y /= stdev_offset
    
    # equally weight each data point
    w = np.ones(len(y))
    lam = 50.
    # try different weights for each data point
    # w = np.ones(len(y))
    # w[0:len(y)/2] = 1.
    # w[len(y)/2:] = 100.
    
    D = coo_matrix(get_1d_penalty_matrix(len(y)))
    z = np.zeros((max_k,len(y)))
    tf = TrendFilteringSolver()
    tf.set_data(y, D, w)
    for k in range(max_k):
        #z[k] = tf.solve(k, lam)
        z[k] = tf.solution_path(k, 0.2, 2000, 100, verbose=True)['best']
    
    y *= stdev_offset
    y += mean_offset
    z *= stdev_offset
    z += mean_offset
    colors = ['orange', 'skyblue', '#009E73', 'purple']
    fig, ax = plt.subplots(max_k)
    x = np.linspace(0,1,len(y))
    for k in range(max_k):
        ax[k].scatter(x, y, alpha=0.5)
        ax[k].plot(x, z[k], lw=2, color=colors[k], label='k={0}'.format(k))
        ax[k].set_xlim([0,1])
        ax[k].set_ylabel('y')
        ax[k].set_title('k={0}'.format(k))
    
    plt.show()
    plt.clf()
def test_solve_gtf_logit():
    max_k = 5
    trials = np.random.randint(5, 30, size=100).astype('int32')
    probs = np.zeros(100)
    probs[:25] = 0.25
    probs[25:50] = 0.75
    probs[50:75] = 0.5
    probs[75:] = 0.1
    successes = np.array([np.random.binomial(t, p) for t,p in zip(trials, probs)]).astype('int32')
    lam = 3.
    D = coo_matrix(get_1d_penalty_matrix(len(trials)))
    z = np.zeros((max_k,len(trials)))
    for k in range(max_k):
        tf = LogitTrendFilteringSolver()
        tf.set_data(trials, successes, D)
        z[k] = tf.solve(k, lam)
    colors = ['orange', 'skyblue', '#009E73', 'purple', 'black']
    fig, ax = plt.subplots(max_k+1)
    x = np.linspace(0,1,len(trials))
    ax[0].bar(x, successes, width=1./len(x), color='darkblue', alpha=0.3)
    ax[0].bar(x, trials-successes, width=1./len(x), color='skyblue', alpha=0.3, bottom=successes)
    ax[0].set_ylim([0,30])
    ax[0].set_xlim([0,1])
    ax[0].set_ylabel('Trials and successes')
    for k in range(max_k):
        ax[k+1].scatter(x, probs, alpha=0.5)
        ax[k+1].plot(x, z[k], lw=2, color=colors[k], label='k={0}'.format(k))
        ax[k+1].set_ylim([0,1])
        ax[k+1].set_xlim([0,1])
        ax[k+1].set_ylabel('Probability of success')
    
    plt.show()
    plt.clf()
def test_solve_gtf_poisson():
    max_k = 5
    probs = np.zeros(100)
    probs[:25] = 5.
    probs[25:50] = 9.
    probs[50:75] = 3.
    probs[75:] = 6.
    obs = np.array([np.random.poisson(p) for p in probs]).astype('int32')
    lam = 5.
    D = coo_matrix(get_1d_penalty_matrix(len(obs)))
    z = np.zeros((max_k,len(obs)))
    for k in range(max_k):
        tf = PoissonTrendFilteringSolver()
        tf.set_data(obs, D)
        z[k] = tf.solve(k, lam)
    colors = ['orange', 'skyblue', '#009E73', 'purple', 'black']
    fig, ax = plt.subplots(max_k+1)
    x = np.linspace(0,1,len(obs))
    ax[0].bar(x, obs, width=1./len(x), color='darkblue', alpha=0.3)
    ax[0].set_xlim([0,1])
    ax[0].set_ylabel('Observations')
    for k in range(max_k):
        ax[k+1].scatter(x, probs, alpha=0.5)
        ax[k+1].plot(x, z[k], lw=2, color=colors[k], label='k={0}'.format(k))
        ax[k+1].set_xlim([0,1])
        ax[k+1].set_ylabel('Beta (k={0})'.format(k))
    
    plt.show()
    plt.clf()
if __name__ == '__main__':
    test_solve_gtf()
 | 
	lgpl-3.0 | 
| 
	borisz264/mod_seq | 
	uniform_colormaps.py | 
	28 | 
	50518 | 
	# New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work.  If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
               [0.002258, 0.001295, 0.018331],
               [0.003279, 0.002305, 0.023708],
               [0.004512, 0.003490, 0.029965],
               [0.005950, 0.004843, 0.037130],
               [0.007588, 0.006356, 0.044973],
               [0.009426, 0.008022, 0.052844],
               [0.011465, 0.009828, 0.060750],
               [0.013708, 0.011771, 0.068667],
               [0.016156, 0.013840, 0.076603],
               [0.018815, 0.016026, 0.084584],
               [0.021692, 0.018320, 0.092610],
               [0.024792, 0.020715, 0.100676],
               [0.028123, 0.023201, 0.108787],
               [0.031696, 0.025765, 0.116965],
               [0.035520, 0.028397, 0.125209],
               [0.039608, 0.031090, 0.133515],
               [0.043830, 0.033830, 0.141886],
               [0.048062, 0.036607, 0.150327],
               [0.052320, 0.039407, 0.158841],
               [0.056615, 0.042160, 0.167446],
               [0.060949, 0.044794, 0.176129],
               [0.065330, 0.047318, 0.184892],
               [0.069764, 0.049726, 0.193735],
               [0.074257, 0.052017, 0.202660],
               [0.078815, 0.054184, 0.211667],
               [0.083446, 0.056225, 0.220755],
               [0.088155, 0.058133, 0.229922],
               [0.092949, 0.059904, 0.239164],
               [0.097833, 0.061531, 0.248477],
               [0.102815, 0.063010, 0.257854],
               [0.107899, 0.064335, 0.267289],
               [0.113094, 0.065492, 0.276784],
               [0.118405, 0.066479, 0.286321],
               [0.123833, 0.067295, 0.295879],
               [0.129380, 0.067935, 0.305443],
               [0.135053, 0.068391, 0.315000],
               [0.140858, 0.068654, 0.324538],
               [0.146785, 0.068738, 0.334011],
               [0.152839, 0.068637, 0.343404],
               [0.159018, 0.068354, 0.352688],
               [0.165308, 0.067911, 0.361816],
               [0.171713, 0.067305, 0.370771],
               [0.178212, 0.066576, 0.379497],
               [0.184801, 0.065732, 0.387973],
               [0.191460, 0.064818, 0.396152],
               [0.198177, 0.063862, 0.404009],
               [0.204935, 0.062907, 0.411514],
               [0.211718, 0.061992, 0.418647],
               [0.218512, 0.061158, 0.425392],
               [0.225302, 0.060445, 0.431742],
               [0.232077, 0.059889, 0.437695],
               [0.238826, 0.059517, 0.443256],
               [0.245543, 0.059352, 0.448436],
               [0.252220, 0.059415, 0.453248],
               [0.258857, 0.059706, 0.457710],
               [0.265447, 0.060237, 0.461840],
               [0.271994, 0.060994, 0.465660],
               [0.278493, 0.061978, 0.469190],
               [0.284951, 0.063168, 0.472451],
               [0.291366, 0.064553, 0.475462],
               [0.297740, 0.066117, 0.478243],
               [0.304081, 0.067835, 0.480812],
               [0.310382, 0.069702, 0.483186],
               [0.316654, 0.071690, 0.485380],
               [0.322899, 0.073782, 0.487408],
               [0.329114, 0.075972, 0.489287],
               [0.335308, 0.078236, 0.491024],
               [0.341482, 0.080564, 0.492631],
               [0.347636, 0.082946, 0.494121],
               [0.353773, 0.085373, 0.495501],
               [0.359898, 0.087831, 0.496778],
               [0.366012, 0.090314, 0.497960],
               [0.372116, 0.092816, 0.499053],
               [0.378211, 0.095332, 0.500067],
               [0.384299, 0.097855, 0.501002],
               [0.390384, 0.100379, 0.501864],
               [0.396467, 0.102902, 0.502658],
               [0.402548, 0.105420, 0.503386],
               [0.408629, 0.107930, 0.504052],
               [0.414709, 0.110431, 0.504662],
               [0.420791, 0.112920, 0.505215],
               [0.426877, 0.115395, 0.505714],
               [0.432967, 0.117855, 0.506160],
               [0.439062, 0.120298, 0.506555],
               [0.445163, 0.122724, 0.506901],
               [0.451271, 0.125132, 0.507198],
               [0.457386, 0.127522, 0.507448],
               [0.463508, 0.129893, 0.507652],
               [0.469640, 0.132245, 0.507809],
               [0.475780, 0.134577, 0.507921],
               [0.481929, 0.136891, 0.507989],
               [0.488088, 0.139186, 0.508011],
               [0.494258, 0.141462, 0.507988],
               [0.500438, 0.143719, 0.507920],
               [0.506629, 0.145958, 0.507806],
               [0.512831, 0.148179, 0.507648],
               [0.519045, 0.150383, 0.507443],
               [0.525270, 0.152569, 0.507192],
               [0.531507, 0.154739, 0.506895],
               [0.537755, 0.156894, 0.506551],
               [0.544015, 0.159033, 0.506159],
               [0.550287, 0.161158, 0.505719],
               [0.556571, 0.163269, 0.505230],
               [0.562866, 0.165368, 0.504692],
               [0.569172, 0.167454, 0.504105],
               [0.575490, 0.169530, 0.503466],
               [0.581819, 0.171596, 0.502777],
               [0.588158, 0.173652, 0.502035],
               [0.594508, 0.175701, 0.501241],
               [0.600868, 0.177743, 0.500394],
               [0.607238, 0.179779, 0.499492],
               [0.613617, 0.181811, 0.498536],
               [0.620005, 0.183840, 0.497524],
               [0.626401, 0.185867, 0.496456],
               [0.632805, 0.187893, 0.495332],
               [0.639216, 0.189921, 0.494150],
               [0.645633, 0.191952, 0.492910],
               [0.652056, 0.193986, 0.491611],
               [0.658483, 0.196027, 0.490253],
               [0.664915, 0.198075, 0.488836],
               [0.671349, 0.200133, 0.487358],
               [0.677786, 0.202203, 0.485819],
               [0.684224, 0.204286, 0.484219],
               [0.690661, 0.206384, 0.482558],
               [0.697098, 0.208501, 0.480835],
               [0.703532, 0.210638, 0.479049],
               [0.709962, 0.212797, 0.477201],
               [0.716387, 0.214982, 0.475290],
               [0.722805, 0.217194, 0.473316],
               [0.729216, 0.219437, 0.471279],
               [0.735616, 0.221713, 0.469180],
               [0.742004, 0.224025, 0.467018],
               [0.748378, 0.226377, 0.464794],
               [0.754737, 0.228772, 0.462509],
               [0.761077, 0.231214, 0.460162],
               [0.767398, 0.233705, 0.457755],
               [0.773695, 0.236249, 0.455289],
               [0.779968, 0.238851, 0.452765],
               [0.786212, 0.241514, 0.450184],
               [0.792427, 0.244242, 0.447543],
               [0.798608, 0.247040, 0.444848],
               [0.804752, 0.249911, 0.442102],
               [0.810855, 0.252861, 0.439305],
               [0.816914, 0.255895, 0.436461],
               [0.822926, 0.259016, 0.433573],
               [0.828886, 0.262229, 0.430644],
               [0.834791, 0.265540, 0.427671],
               [0.840636, 0.268953, 0.424666],
               [0.846416, 0.272473, 0.421631],
               [0.852126, 0.276106, 0.418573],
               [0.857763, 0.279857, 0.415496],
               [0.863320, 0.283729, 0.412403],
               [0.868793, 0.287728, 0.409303],
               [0.874176, 0.291859, 0.406205],
               [0.879464, 0.296125, 0.403118],
               [0.884651, 0.300530, 0.400047],
               [0.889731, 0.305079, 0.397002],
               [0.894700, 0.309773, 0.393995],
               [0.899552, 0.314616, 0.391037],
               [0.904281, 0.319610, 0.388137],
               [0.908884, 0.324755, 0.385308],
               [0.913354, 0.330052, 0.382563],
               [0.917689, 0.335500, 0.379915],
               [0.921884, 0.341098, 0.377376],
               [0.925937, 0.346844, 0.374959],
               [0.929845, 0.352734, 0.372677],
               [0.933606, 0.358764, 0.370541],
               [0.937221, 0.364929, 0.368567],
               [0.940687, 0.371224, 0.366762],
               [0.944006, 0.377643, 0.365136],
               [0.947180, 0.384178, 0.363701],
               [0.950210, 0.390820, 0.362468],
               [0.953099, 0.397563, 0.361438],
               [0.955849, 0.404400, 0.360619],
               [0.958464, 0.411324, 0.360014],
               [0.960949, 0.418323, 0.359630],
               [0.963310, 0.425390, 0.359469],
               [0.965549, 0.432519, 0.359529],
               [0.967671, 0.439703, 0.359810],
               [0.969680, 0.446936, 0.360311],
               [0.971582, 0.454210, 0.361030],
               [0.973381, 0.461520, 0.361965],
               [0.975082, 0.468861, 0.363111],
               [0.976690, 0.476226, 0.364466],
               [0.978210, 0.483612, 0.366025],
               [0.979645, 0.491014, 0.367783],
               [0.981000, 0.498428, 0.369734],
               [0.982279, 0.505851, 0.371874],
               [0.983485, 0.513280, 0.374198],
               [0.984622, 0.520713, 0.376698],
               [0.985693, 0.528148, 0.379371],
               [0.986700, 0.535582, 0.382210],
               [0.987646, 0.543015, 0.385210],
               [0.988533, 0.550446, 0.388365],
               [0.989363, 0.557873, 0.391671],
               [0.990138, 0.565296, 0.395122],
               [0.990871, 0.572706, 0.398714],
               [0.991558, 0.580107, 0.402441],
               [0.992196, 0.587502, 0.406299],
               [0.992785, 0.594891, 0.410283],
               [0.993326, 0.602275, 0.414390],
               [0.993834, 0.609644, 0.418613],
               [0.994309, 0.616999, 0.422950],
               [0.994738, 0.624350, 0.427397],
               [0.995122, 0.631696, 0.431951],
               [0.995480, 0.639027, 0.436607],
               [0.995810, 0.646344, 0.441361],
               [0.996096, 0.653659, 0.446213],
               [0.996341, 0.660969, 0.451160],
               [0.996580, 0.668256, 0.456192],
               [0.996775, 0.675541, 0.461314],
               [0.996925, 0.682828, 0.466526],
               [0.997077, 0.690088, 0.471811],
               [0.997186, 0.697349, 0.477182],
               [0.997254, 0.704611, 0.482635],
               [0.997325, 0.711848, 0.488154],
               [0.997351, 0.719089, 0.493755],
               [0.997351, 0.726324, 0.499428],
               [0.997341, 0.733545, 0.505167],
               [0.997285, 0.740772, 0.510983],
               [0.997228, 0.747981, 0.516859],
               [0.997138, 0.755190, 0.522806],
               [0.997019, 0.762398, 0.528821],
               [0.996898, 0.769591, 0.534892],
               [0.996727, 0.776795, 0.541039],
               [0.996571, 0.783977, 0.547233],
               [0.996369, 0.791167, 0.553499],
               [0.996162, 0.798348, 0.559820],
               [0.995932, 0.805527, 0.566202],
               [0.995680, 0.812706, 0.572645],
               [0.995424, 0.819875, 0.579140],
               [0.995131, 0.827052, 0.585701],
               [0.994851, 0.834213, 0.592307],
               [0.994524, 0.841387, 0.598983],
               [0.994222, 0.848540, 0.605696],
               [0.993866, 0.855711, 0.612482],
               [0.993545, 0.862859, 0.619299],
               [0.993170, 0.870024, 0.626189],
               [0.992831, 0.877168, 0.633109],
               [0.992440, 0.884330, 0.640099],
               [0.992089, 0.891470, 0.647116],
               [0.991688, 0.898627, 0.654202],
               [0.991332, 0.905763, 0.661309],
               [0.990930, 0.912915, 0.668481],
               [0.990570, 0.920049, 0.675675],
               [0.990175, 0.927196, 0.682926],
               [0.989815, 0.934329, 0.690198],
               [0.989434, 0.941470, 0.697519],
               [0.989077, 0.948604, 0.704863],
               [0.988717, 0.955742, 0.712242],
               [0.988367, 0.962878, 0.719649],
               [0.988033, 0.970012, 0.727077],
               [0.987691, 0.977154, 0.734536],
               [0.987387, 0.984288, 0.742002],
               [0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
                 [0.002267, 0.001270, 0.018570],
                 [0.003299, 0.002249, 0.024239],
                 [0.004547, 0.003392, 0.030909],
                 [0.006006, 0.004692, 0.038558],
                 [0.007676, 0.006136, 0.046836],
                 [0.009561, 0.007713, 0.055143],
                 [0.011663, 0.009417, 0.063460],
                 [0.013995, 0.011225, 0.071862],
                 [0.016561, 0.013136, 0.080282],
                 [0.019373, 0.015133, 0.088767],
                 [0.022447, 0.017199, 0.097327],
                 [0.025793, 0.019331, 0.105930],
                 [0.029432, 0.021503, 0.114621],
                 [0.033385, 0.023702, 0.123397],
                 [0.037668, 0.025921, 0.132232],
                 [0.042253, 0.028139, 0.141141],
                 [0.046915, 0.030324, 0.150164],
                 [0.051644, 0.032474, 0.159254],
                 [0.056449, 0.034569, 0.168414],
                 [0.061340, 0.036590, 0.177642],
                 [0.066331, 0.038504, 0.186962],
                 [0.071429, 0.040294, 0.196354],
                 [0.076637, 0.041905, 0.205799],
                 [0.081962, 0.043328, 0.215289],
                 [0.087411, 0.044556, 0.224813],
                 [0.092990, 0.045583, 0.234358],
                 [0.098702, 0.046402, 0.243904],
                 [0.104551, 0.047008, 0.253430],
                 [0.110536, 0.047399, 0.262912],
                 [0.116656, 0.047574, 0.272321],
                 [0.122908, 0.047536, 0.281624],
                 [0.129285, 0.047293, 0.290788],
                 [0.135778, 0.046856, 0.299776],
                 [0.142378, 0.046242, 0.308553],
                 [0.149073, 0.045468, 0.317085],
                 [0.155850, 0.044559, 0.325338],
                 [0.162689, 0.043554, 0.333277],
                 [0.169575, 0.042489, 0.340874],
                 [0.176493, 0.041402, 0.348111],
                 [0.183429, 0.040329, 0.354971],
                 [0.190367, 0.039309, 0.361447],
                 [0.197297, 0.038400, 0.367535],
                 [0.204209, 0.037632, 0.373238],
                 [0.211095, 0.037030, 0.378563],
                 [0.217949, 0.036615, 0.383522],
                 [0.224763, 0.036405, 0.388129],
                 [0.231538, 0.036405, 0.392400],
                 [0.238273, 0.036621, 0.396353],
                 [0.244967, 0.037055, 0.400007],
                 [0.251620, 0.037705, 0.403378],
                 [0.258234, 0.038571, 0.406485],
                 [0.264810, 0.039647, 0.409345],
                 [0.271347, 0.040922, 0.411976],
                 [0.277850, 0.042353, 0.414392],
                 [0.284321, 0.043933, 0.416608],
                 [0.290763, 0.045644, 0.418637],
                 [0.297178, 0.047470, 0.420491],
                 [0.303568, 0.049396, 0.422182],
                 [0.309935, 0.051407, 0.423721],
                 [0.316282, 0.053490, 0.425116],
                 [0.322610, 0.055634, 0.426377],
                 [0.328921, 0.057827, 0.427511],
                 [0.335217, 0.060060, 0.428524],
                 [0.341500, 0.062325, 0.429425],
                 [0.347771, 0.064616, 0.430217],
                 [0.354032, 0.066925, 0.430906],
                 [0.360284, 0.069247, 0.431497],
                 [0.366529, 0.071579, 0.431994],
                 [0.372768, 0.073915, 0.432400],
                 [0.379001, 0.076253, 0.432719],
                 [0.385228, 0.078591, 0.432955],
                 [0.391453, 0.080927, 0.433109],
                 [0.397674, 0.083257, 0.433183],
                 [0.403894, 0.085580, 0.433179],
                 [0.410113, 0.087896, 0.433098],
                 [0.416331, 0.090203, 0.432943],
                 [0.422549, 0.092501, 0.432714],
                 [0.428768, 0.094790, 0.432412],
                 [0.434987, 0.097069, 0.432039],
                 [0.441207, 0.099338, 0.431594],
                 [0.447428, 0.101597, 0.431080],
                 [0.453651, 0.103848, 0.430498],
                 [0.459875, 0.106089, 0.429846],
                 [0.466100, 0.108322, 0.429125],
                 [0.472328, 0.110547, 0.428334],
                 [0.478558, 0.112764, 0.427475],
                 [0.484789, 0.114974, 0.426548],
                 [0.491022, 0.117179, 0.425552],
                 [0.497257, 0.119379, 0.424488],
                 [0.503493, 0.121575, 0.423356],
                 [0.509730, 0.123769, 0.422156],
                 [0.515967, 0.125960, 0.420887],
                 [0.522206, 0.128150, 0.419549],
                 [0.528444, 0.130341, 0.418142],
                 [0.534683, 0.132534, 0.416667],
                 [0.540920, 0.134729, 0.415123],
                 [0.547157, 0.136929, 0.413511],
                 [0.553392, 0.139134, 0.411829],
                 [0.559624, 0.141346, 0.410078],
                 [0.565854, 0.143567, 0.408258],
                 [0.572081, 0.145797, 0.406369],
                 [0.578304, 0.148039, 0.404411],
                 [0.584521, 0.150294, 0.402385],
                 [0.590734, 0.152563, 0.400290],
                 [0.596940, 0.154848, 0.398125],
                 [0.603139, 0.157151, 0.395891],
                 [0.609330, 0.159474, 0.393589],
                 [0.615513, 0.161817, 0.391219],
                 [0.621685, 0.164184, 0.388781],
                 [0.627847, 0.166575, 0.386276],
                 [0.633998, 0.168992, 0.383704],
                 [0.640135, 0.171438, 0.381065],
                 [0.646260, 0.173914, 0.378359],
                 [0.652369, 0.176421, 0.375586],
                 [0.658463, 0.178962, 0.372748],
                 [0.664540, 0.181539, 0.369846],
                 [0.670599, 0.184153, 0.366879],
                 [0.676638, 0.186807, 0.363849],
                 [0.682656, 0.189501, 0.360757],
                 [0.688653, 0.192239, 0.357603],
                 [0.694627, 0.195021, 0.354388],
                 [0.700576, 0.197851, 0.351113],
                 [0.706500, 0.200728, 0.347777],
                 [0.712396, 0.203656, 0.344383],
                 [0.718264, 0.206636, 0.340931],
                 [0.724103, 0.209670, 0.337424],
                 [0.729909, 0.212759, 0.333861],
                 [0.735683, 0.215906, 0.330245],
                 [0.741423, 0.219112, 0.326576],
                 [0.747127, 0.222378, 0.322856],
                 [0.752794, 0.225706, 0.319085],
                 [0.758422, 0.229097, 0.315266],
                 [0.764010, 0.232554, 0.311399],
                 [0.769556, 0.236077, 0.307485],
                 [0.775059, 0.239667, 0.303526],
                 [0.780517, 0.243327, 0.299523],
                 [0.785929, 0.247056, 0.295477],
                 [0.791293, 0.250856, 0.291390],
                 [0.796607, 0.254728, 0.287264],
                 [0.801871, 0.258674, 0.283099],
                 [0.807082, 0.262692, 0.278898],
                 [0.812239, 0.266786, 0.274661],
                 [0.817341, 0.270954, 0.270390],
                 [0.822386, 0.275197, 0.266085],
                 [0.827372, 0.279517, 0.261750],
                 [0.832299, 0.283913, 0.257383],
                 [0.837165, 0.288385, 0.252988],
                 [0.841969, 0.292933, 0.248564],
                 [0.846709, 0.297559, 0.244113],
                 [0.851384, 0.302260, 0.239636],
                 [0.855992, 0.307038, 0.235133],
                 [0.860533, 0.311892, 0.230606],
                 [0.865006, 0.316822, 0.226055],
                 [0.869409, 0.321827, 0.221482],
                 [0.873741, 0.326906, 0.216886],
                 [0.878001, 0.332060, 0.212268],
                 [0.882188, 0.337287, 0.207628],
                 [0.886302, 0.342586, 0.202968],
                 [0.890341, 0.347957, 0.198286],
                 [0.894305, 0.353399, 0.193584],
                 [0.898192, 0.358911, 0.188860],
                 [0.902003, 0.364492, 0.184116],
                 [0.905735, 0.370140, 0.179350],
                 [0.909390, 0.375856, 0.174563],
                 [0.912966, 0.381636, 0.169755],
                 [0.916462, 0.387481, 0.164924],
                 [0.919879, 0.393389, 0.160070],
                 [0.923215, 0.399359, 0.155193],
                 [0.926470, 0.405389, 0.150292],
                 [0.929644, 0.411479, 0.145367],
                 [0.932737, 0.417627, 0.140417],
                 [0.935747, 0.423831, 0.135440],
                 [0.938675, 0.430091, 0.130438],
                 [0.941521, 0.436405, 0.125409],
                 [0.944285, 0.442772, 0.120354],
                 [0.946965, 0.449191, 0.115272],
                 [0.949562, 0.455660, 0.110164],
                 [0.952075, 0.462178, 0.105031],
                 [0.954506, 0.468744, 0.099874],
                 [0.956852, 0.475356, 0.094695],
                 [0.959114, 0.482014, 0.089499],
                 [0.961293, 0.488716, 0.084289],
                 [0.963387, 0.495462, 0.079073],
                 [0.965397, 0.502249, 0.073859],
                 [0.967322, 0.509078, 0.068659],
                 [0.969163, 0.515946, 0.063488],
                 [0.970919, 0.522853, 0.058367],
                 [0.972590, 0.529798, 0.053324],
                 [0.974176, 0.536780, 0.048392],
                 [0.975677, 0.543798, 0.043618],
                 [0.977092, 0.550850, 0.039050],
                 [0.978422, 0.557937, 0.034931],
                 [0.979666, 0.565057, 0.031409],
                 [0.980824, 0.572209, 0.028508],
                 [0.981895, 0.579392, 0.026250],
                 [0.982881, 0.586606, 0.024661],
                 [0.983779, 0.593849, 0.023770],
                 [0.984591, 0.601122, 0.023606],
                 [0.985315, 0.608422, 0.024202],
                 [0.985952, 0.615750, 0.025592],
                 [0.986502, 0.623105, 0.027814],
                 [0.986964, 0.630485, 0.030908],
                 [0.987337, 0.637890, 0.034916],
                 [0.987622, 0.645320, 0.039886],
                 [0.987819, 0.652773, 0.045581],
                 [0.987926, 0.660250, 0.051750],
                 [0.987945, 0.667748, 0.058329],
                 [0.987874, 0.675267, 0.065257],
                 [0.987714, 0.682807, 0.072489],
                 [0.987464, 0.690366, 0.079990],
                 [0.987124, 0.697944, 0.087731],
                 [0.986694, 0.705540, 0.095694],
                 [0.986175, 0.713153, 0.103863],
                 [0.985566, 0.720782, 0.112229],
                 [0.984865, 0.728427, 0.120785],
                 [0.984075, 0.736087, 0.129527],
                 [0.983196, 0.743758, 0.138453],
                 [0.982228, 0.751442, 0.147565],
                 [0.981173, 0.759135, 0.156863],
                 [0.980032, 0.766837, 0.166353],
                 [0.978806, 0.774545, 0.176037],
                 [0.977497, 0.782258, 0.185923],
                 [0.976108, 0.789974, 0.196018],
                 [0.974638, 0.797692, 0.206332],
                 [0.973088, 0.805409, 0.216877],
                 [0.971468, 0.813122, 0.227658],
                 [0.969783, 0.820825, 0.238686],
                 [0.968041, 0.828515, 0.249972],
                 [0.966243, 0.836191, 0.261534],
                 [0.964394, 0.843848, 0.273391],
                 [0.962517, 0.851476, 0.285546],
                 [0.960626, 0.859069, 0.298010],
                 [0.958720, 0.866624, 0.310820],
                 [0.956834, 0.874129, 0.323974],
                 [0.954997, 0.881569, 0.337475],
                 [0.953215, 0.888942, 0.351369],
                 [0.951546, 0.896226, 0.365627],
                 [0.950018, 0.903409, 0.380271],
                 [0.948683, 0.910473, 0.395289],
                 [0.947594, 0.917399, 0.410665],
                 [0.946809, 0.924168, 0.426373],
                 [0.946392, 0.930761, 0.442367],
                 [0.946403, 0.937159, 0.458592],
                 [0.946903, 0.943348, 0.474970],
                 [0.947937, 0.949318, 0.491426],
                 [0.949545, 0.955063, 0.507860],
                 [0.951740, 0.960587, 0.524203],
                 [0.954529, 0.965896, 0.540361],
                 [0.957896, 0.971003, 0.556275],
                 [0.961812, 0.975924, 0.571925],
                 [0.966249, 0.980678, 0.587206],
                 [0.971162, 0.985282, 0.602154],
                 [0.976511, 0.989753, 0.616760],
                 [0.982257, 0.994109, 0.631017],
                 [0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
                [0.063536, 0.028426, 0.533124],
                [0.075353, 0.027206, 0.538007],
                [0.086222, 0.026125, 0.542658],
                [0.096379, 0.025165, 0.547103],
                [0.105980, 0.024309, 0.551368],
                [0.115124, 0.023556, 0.555468],
                [0.123903, 0.022878, 0.559423],
                [0.132381, 0.022258, 0.563250],
                [0.140603, 0.021687, 0.566959],
                [0.148607, 0.021154, 0.570562],
                [0.156421, 0.020651, 0.574065],
                [0.164070, 0.020171, 0.577478],
                [0.171574, 0.019706, 0.580806],
                [0.178950, 0.019252, 0.584054],
                [0.186213, 0.018803, 0.587228],
                [0.193374, 0.018354, 0.590330],
                [0.200445, 0.017902, 0.593364],
                [0.207435, 0.017442, 0.596333],
                [0.214350, 0.016973, 0.599239],
                [0.221197, 0.016497, 0.602083],
                [0.227983, 0.016007, 0.604867],
                [0.234715, 0.015502, 0.607592],
                [0.241396, 0.014979, 0.610259],
                [0.248032, 0.014439, 0.612868],
                [0.254627, 0.013882, 0.615419],
                [0.261183, 0.013308, 0.617911],
                [0.267703, 0.012716, 0.620346],
                [0.274191, 0.012109, 0.622722],
                [0.280648, 0.011488, 0.625038],
                [0.287076, 0.010855, 0.627295],
                [0.293478, 0.010213, 0.629490],
                [0.299855, 0.009561, 0.631624],
                [0.306210, 0.008902, 0.633694],
                [0.312543, 0.008239, 0.635700],
                [0.318856, 0.007576, 0.637640],
                [0.325150, 0.006915, 0.639512],
                [0.331426, 0.006261, 0.641316],
                [0.337683, 0.005618, 0.643049],
                [0.343925, 0.004991, 0.644710],
                [0.350150, 0.004382, 0.646298],
                [0.356359, 0.003798, 0.647810],
                [0.362553, 0.003243, 0.649245],
                [0.368733, 0.002724, 0.650601],
                [0.374897, 0.002245, 0.651876],
                [0.381047, 0.001814, 0.653068],
                [0.387183, 0.001434, 0.654177],
                [0.393304, 0.001114, 0.655199],
                [0.399411, 0.000859, 0.656133],
                [0.405503, 0.000678, 0.656977],
                [0.411580, 0.000577, 0.657730],
                [0.417642, 0.000564, 0.658390],
                [0.423689, 0.000646, 0.658956],
                [0.429719, 0.000831, 0.659425],
                [0.435734, 0.001127, 0.659797],
                [0.441732, 0.001540, 0.660069],
                [0.447714, 0.002080, 0.660240],
                [0.453677, 0.002755, 0.660310],
                [0.459623, 0.003574, 0.660277],
                [0.465550, 0.004545, 0.660139],
                [0.471457, 0.005678, 0.659897],
                [0.477344, 0.006980, 0.659549],
                [0.483210, 0.008460, 0.659095],
                [0.489055, 0.010127, 0.658534],
                [0.494877, 0.011990, 0.657865],
                [0.500678, 0.014055, 0.657088],
                [0.506454, 0.016333, 0.656202],
                [0.512206, 0.018833, 0.655209],
                [0.517933, 0.021563, 0.654109],
                [0.523633, 0.024532, 0.652901],
                [0.529306, 0.027747, 0.651586],
                [0.534952, 0.031217, 0.650165],
                [0.540570, 0.034950, 0.648640],
                [0.546157, 0.038954, 0.647010],
                [0.551715, 0.043136, 0.645277],
                [0.557243, 0.047331, 0.643443],
                [0.562738, 0.051545, 0.641509],
                [0.568201, 0.055778, 0.639477],
                [0.573632, 0.060028, 0.637349],
                [0.579029, 0.064296, 0.635126],
                [0.584391, 0.068579, 0.632812],
                [0.589719, 0.072878, 0.630408],
                [0.595011, 0.077190, 0.627917],
                [0.600266, 0.081516, 0.625342],
                [0.605485, 0.085854, 0.622686],
                [0.610667, 0.090204, 0.619951],
                [0.615812, 0.094564, 0.617140],
                [0.620919, 0.098934, 0.614257],
                [0.625987, 0.103312, 0.611305],
                [0.631017, 0.107699, 0.608287],
                [0.636008, 0.112092, 0.605205],
                [0.640959, 0.116492, 0.602065],
                [0.645872, 0.120898, 0.598867],
                [0.650746, 0.125309, 0.595617],
                [0.655580, 0.129725, 0.592317],
                [0.660374, 0.134144, 0.588971],
                [0.665129, 0.138566, 0.585582],
                [0.669845, 0.142992, 0.582154],
                [0.674522, 0.147419, 0.578688],
                [0.679160, 0.151848, 0.575189],
                [0.683758, 0.156278, 0.571660],
                [0.688318, 0.160709, 0.568103],
                [0.692840, 0.165141, 0.564522],
                [0.697324, 0.169573, 0.560919],
                [0.701769, 0.174005, 0.557296],
                [0.706178, 0.178437, 0.553657],
                [0.710549, 0.182868, 0.550004],
                [0.714883, 0.187299, 0.546338],
                [0.719181, 0.191729, 0.542663],
                [0.723444, 0.196158, 0.538981],
                [0.727670, 0.200586, 0.535293],
                [0.731862, 0.205013, 0.531601],
                [0.736019, 0.209439, 0.527908],
                [0.740143, 0.213864, 0.524216],
                [0.744232, 0.218288, 0.520524],
                [0.748289, 0.222711, 0.516834],
                [0.752312, 0.227133, 0.513149],
                [0.756304, 0.231555, 0.509468],
                [0.760264, 0.235976, 0.505794],
                [0.764193, 0.240396, 0.502126],
                [0.768090, 0.244817, 0.498465],
                [0.771958, 0.249237, 0.494813],
                [0.775796, 0.253658, 0.491171],
                [0.779604, 0.258078, 0.487539],
                [0.783383, 0.262500, 0.483918],
                [0.787133, 0.266922, 0.480307],
                [0.790855, 0.271345, 0.476706],
                [0.794549, 0.275770, 0.473117],
                [0.798216, 0.280197, 0.469538],
                [0.801855, 0.284626, 0.465971],
                [0.805467, 0.289057, 0.462415],
                [0.809052, 0.293491, 0.458870],
                [0.812612, 0.297928, 0.455338],
                [0.816144, 0.302368, 0.451816],
                [0.819651, 0.306812, 0.448306],
                [0.823132, 0.311261, 0.444806],
                [0.826588, 0.315714, 0.441316],
                [0.830018, 0.320172, 0.437836],
                [0.833422, 0.324635, 0.434366],
                [0.836801, 0.329105, 0.430905],
                [0.840155, 0.333580, 0.427455],
                [0.843484, 0.338062, 0.424013],
                [0.846788, 0.342551, 0.420579],
                [0.850066, 0.347048, 0.417153],
                [0.853319, 0.351553, 0.413734],
                [0.856547, 0.356066, 0.410322],
                [0.859750, 0.360588, 0.406917],
                [0.862927, 0.365119, 0.403519],
                [0.866078, 0.369660, 0.400126],
                [0.869203, 0.374212, 0.396738],
                [0.872303, 0.378774, 0.393355],
                [0.875376, 0.383347, 0.389976],
                [0.878423, 0.387932, 0.386600],
                [0.881443, 0.392529, 0.383229],
                [0.884436, 0.397139, 0.379860],
                [0.887402, 0.401762, 0.376494],
                [0.890340, 0.406398, 0.373130],
                [0.893250, 0.411048, 0.369768],
                [0.896131, 0.415712, 0.366407],
                [0.898984, 0.420392, 0.363047],
                [0.901807, 0.425087, 0.359688],
                [0.904601, 0.429797, 0.356329],
                [0.907365, 0.434524, 0.352970],
                [0.910098, 0.439268, 0.349610],
                [0.912800, 0.444029, 0.346251],
                [0.915471, 0.448807, 0.342890],
                [0.918109, 0.453603, 0.339529],
                [0.920714, 0.458417, 0.336166],
                [0.923287, 0.463251, 0.332801],
                [0.925825, 0.468103, 0.329435],
                [0.928329, 0.472975, 0.326067],
                [0.930798, 0.477867, 0.322697],
                [0.933232, 0.482780, 0.319325],
                [0.935630, 0.487712, 0.315952],
                [0.937990, 0.492667, 0.312575],
                [0.940313, 0.497642, 0.309197],
                [0.942598, 0.502639, 0.305816],
                [0.944844, 0.507658, 0.302433],
                [0.947051, 0.512699, 0.299049],
                [0.949217, 0.517763, 0.295662],
                [0.951344, 0.522850, 0.292275],
                [0.953428, 0.527960, 0.288883],
                [0.955470, 0.533093, 0.285490],
                [0.957469, 0.538250, 0.282096],
                [0.959424, 0.543431, 0.278701],
                [0.961336, 0.548636, 0.275305],
                [0.963203, 0.553865, 0.271909],
                [0.965024, 0.559118, 0.268513],
                [0.966798, 0.564396, 0.265118],
                [0.968526, 0.569700, 0.261721],
                [0.970205, 0.575028, 0.258325],
                [0.971835, 0.580382, 0.254931],
                [0.973416, 0.585761, 0.251540],
                [0.974947, 0.591165, 0.248151],
                [0.976428, 0.596595, 0.244767],
                [0.977856, 0.602051, 0.241387],
                [0.979233, 0.607532, 0.238013],
                [0.980556, 0.613039, 0.234646],
                [0.981826, 0.618572, 0.231287],
                [0.983041, 0.624131, 0.227937],
                [0.984199, 0.629718, 0.224595],
                [0.985301, 0.635330, 0.221265],
                [0.986345, 0.640969, 0.217948],
                [0.987332, 0.646633, 0.214648],
                [0.988260, 0.652325, 0.211364],
                [0.989128, 0.658043, 0.208100],
                [0.989935, 0.663787, 0.204859],
                [0.990681, 0.669558, 0.201642],
                [0.991365, 0.675355, 0.198453],
                [0.991985, 0.681179, 0.195295],
                [0.992541, 0.687030, 0.192170],
                [0.993032, 0.692907, 0.189084],
                [0.993456, 0.698810, 0.186041],
                [0.993814, 0.704741, 0.183043],
                [0.994103, 0.710698, 0.180097],
                [0.994324, 0.716681, 0.177208],
                [0.994474, 0.722691, 0.174381],
                [0.994553, 0.728728, 0.171622],
                [0.994561, 0.734791, 0.168938],
                [0.994495, 0.740880, 0.166335],
                [0.994355, 0.746995, 0.163821],
                [0.994141, 0.753137, 0.161404],
                [0.993851, 0.759304, 0.159092],
                [0.993482, 0.765499, 0.156891],
                [0.993033, 0.771720, 0.154808],
                [0.992505, 0.777967, 0.152855],
                [0.991897, 0.784239, 0.151042],
                [0.991209, 0.790537, 0.149377],
                [0.990439, 0.796859, 0.147870],
                [0.989587, 0.803205, 0.146529],
                [0.988648, 0.809579, 0.145357],
                [0.987621, 0.815978, 0.144363],
                [0.986509, 0.822401, 0.143557],
                [0.985314, 0.828846, 0.142945],
                [0.984031, 0.835315, 0.142528],
                [0.982653, 0.841812, 0.142303],
                [0.981190, 0.848329, 0.142279],
                [0.979644, 0.854866, 0.142453],
                [0.977995, 0.861432, 0.142808],
                [0.976265, 0.868016, 0.143351],
                [0.974443, 0.874622, 0.144061],
                [0.972530, 0.881250, 0.144923],
                [0.970533, 0.887896, 0.145919],
                [0.968443, 0.894564, 0.147014],
                [0.966271, 0.901249, 0.148180],
                [0.964021, 0.907950, 0.149370],
                [0.961681, 0.914672, 0.150520],
                [0.959276, 0.921407, 0.151566],
                [0.956808, 0.928152, 0.152409],
                [0.954287, 0.934908, 0.152921],
                [0.951726, 0.941671, 0.152925],
                [0.949151, 0.948435, 0.152178],
                [0.946602, 0.955190, 0.150328],
                [0.944152, 0.961916, 0.146861],
                [0.941896, 0.968590, 0.140956],
                [0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
                 [0.268510, 0.009605, 0.335427],
                 [0.269944, 0.014625, 0.341379],
                 [0.271305, 0.019942, 0.347269],
                 [0.272594, 0.025563, 0.353093],
                 [0.273809, 0.031497, 0.358853],
                 [0.274952, 0.037752, 0.364543],
                 [0.276022, 0.044167, 0.370164],
                 [0.277018, 0.050344, 0.375715],
                 [0.277941, 0.056324, 0.381191],
                 [0.278791, 0.062145, 0.386592],
                 [0.279566, 0.067836, 0.391917],
                 [0.280267, 0.073417, 0.397163],
                 [0.280894, 0.078907, 0.402329],
                 [0.281446, 0.084320, 0.407414],
                 [0.281924, 0.089666, 0.412415],
                 [0.282327, 0.094955, 0.417331],
                 [0.282656, 0.100196, 0.422160],
                 [0.282910, 0.105393, 0.426902],
                 [0.283091, 0.110553, 0.431554],
                 [0.283197, 0.115680, 0.436115],
                 [0.283229, 0.120777, 0.440584],
                 [0.283187, 0.125848, 0.444960],
                 [0.283072, 0.130895, 0.449241],
                 [0.282884, 0.135920, 0.453427],
                 [0.282623, 0.140926, 0.457517],
                 [0.282290, 0.145912, 0.461510],
                 [0.281887, 0.150881, 0.465405],
                 [0.281412, 0.155834, 0.469201],
                 [0.280868, 0.160771, 0.472899],
                 [0.280255, 0.165693, 0.476498],
                 [0.279574, 0.170599, 0.479997],
                 [0.278826, 0.175490, 0.483397],
                 [0.278012, 0.180367, 0.486697],
                 [0.277134, 0.185228, 0.489898],
                 [0.276194, 0.190074, 0.493001],
                 [0.275191, 0.194905, 0.496005],
                 [0.274128, 0.199721, 0.498911],
                 [0.273006, 0.204520, 0.501721],
                 [0.271828, 0.209303, 0.504434],
                 [0.270595, 0.214069, 0.507052],
                 [0.269308, 0.218818, 0.509577],
                 [0.267968, 0.223549, 0.512008],
                 [0.266580, 0.228262, 0.514349],
                 [0.265145, 0.232956, 0.516599],
                 [0.263663, 0.237631, 0.518762],
                 [0.262138, 0.242286, 0.520837],
                 [0.260571, 0.246922, 0.522828],
                 [0.258965, 0.251537, 0.524736],
                 [0.257322, 0.256130, 0.526563],
                 [0.255645, 0.260703, 0.528312],
                 [0.253935, 0.265254, 0.529983],
                 [0.252194, 0.269783, 0.531579],
                 [0.250425, 0.274290, 0.533103],
                 [0.248629, 0.278775, 0.534556],
                 [0.246811, 0.283237, 0.535941],
                 [0.244972, 0.287675, 0.537260],
                 [0.243113, 0.292092, 0.538516],
                 [0.241237, 0.296485, 0.539709],
                 [0.239346, 0.300855, 0.540844],
                 [0.237441, 0.305202, 0.541921],
                 [0.235526, 0.309527, 0.542944],
                 [0.233603, 0.313828, 0.543914],
                 [0.231674, 0.318106, 0.544834],
                 [0.229739, 0.322361, 0.545706],
                 [0.227802, 0.326594, 0.546532],
                 [0.225863, 0.330805, 0.547314],
                 [0.223925, 0.334994, 0.548053],
                 [0.221989, 0.339161, 0.548752],
                 [0.220057, 0.343307, 0.549413],
                 [0.218130, 0.347432, 0.550038],
                 [0.216210, 0.351535, 0.550627],
                 [0.214298, 0.355619, 0.551184],
                 [0.212395, 0.359683, 0.551710],
                 [0.210503, 0.363727, 0.552206],
                 [0.208623, 0.367752, 0.552675],
                 [0.206756, 0.371758, 0.553117],
                 [0.204903, 0.375746, 0.553533],
                 [0.203063, 0.379716, 0.553925],
                 [0.201239, 0.383670, 0.554294],
                 [0.199430, 0.387607, 0.554642],
                 [0.197636, 0.391528, 0.554969],
                 [0.195860, 0.395433, 0.555276],
                 [0.194100, 0.399323, 0.555565],
                 [0.192357, 0.403199, 0.555836],
                 [0.190631, 0.407061, 0.556089],
                 [0.188923, 0.410910, 0.556326],
                 [0.187231, 0.414746, 0.556547],
                 [0.185556, 0.418570, 0.556753],
                 [0.183898, 0.422383, 0.556944],
                 [0.182256, 0.426184, 0.557120],
                 [0.180629, 0.429975, 0.557282],
                 [0.179019, 0.433756, 0.557430],
                 [0.177423, 0.437527, 0.557565],
                 [0.175841, 0.441290, 0.557685],
                 [0.174274, 0.445044, 0.557792],
                 [0.172719, 0.448791, 0.557885],
                 [0.171176, 0.452530, 0.557965],
                 [0.169646, 0.456262, 0.558030],
                 [0.168126, 0.459988, 0.558082],
                 [0.166617, 0.463708, 0.558119],
                 [0.165117, 0.467423, 0.558141],
                 [0.163625, 0.471133, 0.558148],
                 [0.162142, 0.474838, 0.558140],
                 [0.160665, 0.478540, 0.558115],
                 [0.159194, 0.482237, 0.558073],
                 [0.157729, 0.485932, 0.558013],
                 [0.156270, 0.489624, 0.557936],
                 [0.154815, 0.493313, 0.557840],
                 [0.153364, 0.497000, 0.557724],
                 [0.151918, 0.500685, 0.557587],
                 [0.150476, 0.504369, 0.557430],
                 [0.149039, 0.508051, 0.557250],
                 [0.147607, 0.511733, 0.557049],
                 [0.146180, 0.515413, 0.556823],
                 [0.144759, 0.519093, 0.556572],
                 [0.143343, 0.522773, 0.556295],
                 [0.141935, 0.526453, 0.555991],
                 [0.140536, 0.530132, 0.555659],
                 [0.139147, 0.533812, 0.555298],
                 [0.137770, 0.537492, 0.554906],
                 [0.136408, 0.541173, 0.554483],
                 [0.135066, 0.544853, 0.554029],
                 [0.133743, 0.548535, 0.553541],
                 [0.132444, 0.552216, 0.553018],
                 [0.131172, 0.555899, 0.552459],
                 [0.129933, 0.559582, 0.551864],
                 [0.128729, 0.563265, 0.551229],
                 [0.127568, 0.566949, 0.550556],
                 [0.126453, 0.570633, 0.549841],
                 [0.125394, 0.574318, 0.549086],
                 [0.124395, 0.578002, 0.548287],
                 [0.123463, 0.581687, 0.547445],
                 [0.122606, 0.585371, 0.546557],
                 [0.121831, 0.589055, 0.545623],
                 [0.121148, 0.592739, 0.544641],
                 [0.120565, 0.596422, 0.543611],
                 [0.120092, 0.600104, 0.542530],
                 [0.119738, 0.603785, 0.541400],
                 [0.119512, 0.607464, 0.540218],
                 [0.119423, 0.611141, 0.538982],
                 [0.119483, 0.614817, 0.537692],
                 [0.119699, 0.618490, 0.536347],
                 [0.120081, 0.622161, 0.534946],
                 [0.120638, 0.625828, 0.533488],
                 [0.121380, 0.629492, 0.531973],
                 [0.122312, 0.633153, 0.530398],
                 [0.123444, 0.636809, 0.528763],
                 [0.124780, 0.640461, 0.527068],
                 [0.126326, 0.644107, 0.525311],
                 [0.128087, 0.647749, 0.523491],
                 [0.130067, 0.651384, 0.521608],
                 [0.132268, 0.655014, 0.519661],
                 [0.134692, 0.658636, 0.517649],
                 [0.137339, 0.662252, 0.515571],
                 [0.140210, 0.665859, 0.513427],
                 [0.143303, 0.669459, 0.511215],
                 [0.146616, 0.673050, 0.508936],
                 [0.150148, 0.676631, 0.506589],
                 [0.153894, 0.680203, 0.504172],
                 [0.157851, 0.683765, 0.501686],
                 [0.162016, 0.687316, 0.499129],
                 [0.166383, 0.690856, 0.496502],
                 [0.170948, 0.694384, 0.493803],
                 [0.175707, 0.697900, 0.491033],
                 [0.180653, 0.701402, 0.488189],
                 [0.185783, 0.704891, 0.485273],
                 [0.191090, 0.708366, 0.482284],
                 [0.196571, 0.711827, 0.479221],
                 [0.202219, 0.715272, 0.476084],
                 [0.208030, 0.718701, 0.472873],
                 [0.214000, 0.722114, 0.469588],
                 [0.220124, 0.725509, 0.466226],
                 [0.226397, 0.728888, 0.462789],
                 [0.232815, 0.732247, 0.459277],
                 [0.239374, 0.735588, 0.455688],
                 [0.246070, 0.738910, 0.452024],
                 [0.252899, 0.742211, 0.448284],
                 [0.259857, 0.745492, 0.444467],
                 [0.266941, 0.748751, 0.440573],
                 [0.274149, 0.751988, 0.436601],
                 [0.281477, 0.755203, 0.432552],
                 [0.288921, 0.758394, 0.428426],
                 [0.296479, 0.761561, 0.424223],
                 [0.304148, 0.764704, 0.419943],
                 [0.311925, 0.767822, 0.415586],
                 [0.319809, 0.770914, 0.411152],
                 [0.327796, 0.773980, 0.406640],
                 [0.335885, 0.777018, 0.402049],
                 [0.344074, 0.780029, 0.397381],
                 [0.352360, 0.783011, 0.392636],
                 [0.360741, 0.785964, 0.387814],
                 [0.369214, 0.788888, 0.382914],
                 [0.377779, 0.791781, 0.377939],
                 [0.386433, 0.794644, 0.372886],
                 [0.395174, 0.797475, 0.367757],
                 [0.404001, 0.800275, 0.362552],
                 [0.412913, 0.803041, 0.357269],
                 [0.421908, 0.805774, 0.351910],
                 [0.430983, 0.808473, 0.346476],
                 [0.440137, 0.811138, 0.340967],
                 [0.449368, 0.813768, 0.335384],
                 [0.458674, 0.816363, 0.329727],
                 [0.468053, 0.818921, 0.323998],
                 [0.477504, 0.821444, 0.318195],
                 [0.487026, 0.823929, 0.312321],
                 [0.496615, 0.826376, 0.306377],
                 [0.506271, 0.828786, 0.300362],
                 [0.515992, 0.831158, 0.294279],
                 [0.525776, 0.833491, 0.288127],
                 [0.535621, 0.835785, 0.281908],
                 [0.545524, 0.838039, 0.275626],
                 [0.555484, 0.840254, 0.269281],
                 [0.565498, 0.842430, 0.262877],
                 [0.575563, 0.844566, 0.256415],
                 [0.585678, 0.846661, 0.249897],
                 [0.595839, 0.848717, 0.243329],
                 [0.606045, 0.850733, 0.236712],
                 [0.616293, 0.852709, 0.230052],
                 [0.626579, 0.854645, 0.223353],
                 [0.636902, 0.856542, 0.216620],
                 [0.647257, 0.858400, 0.209861],
                 [0.657642, 0.860219, 0.203082],
                 [0.668054, 0.861999, 0.196293],
                 [0.678489, 0.863742, 0.189503],
                 [0.688944, 0.865448, 0.182725],
                 [0.699415, 0.867117, 0.175971],
                 [0.709898, 0.868751, 0.169257],
                 [0.720391, 0.870350, 0.162603],
                 [0.730889, 0.871916, 0.156029],
                 [0.741388, 0.873449, 0.149561],
                 [0.751884, 0.874951, 0.143228],
                 [0.762373, 0.876424, 0.137064],
                 [0.772852, 0.877868, 0.131109],
                 [0.783315, 0.879285, 0.125405],
                 [0.793760, 0.880678, 0.120005],
                 [0.804182, 0.882046, 0.114965],
                 [0.814576, 0.883393, 0.110347],
                 [0.824940, 0.884720, 0.106217],
                 [0.835270, 0.886029, 0.102646],
                 [0.845561, 0.887322, 0.099702],
                 [0.855810, 0.888601, 0.097452],
                 [0.866013, 0.889868, 0.095953],
                 [0.876168, 0.891125, 0.095250],
                 [0.886271, 0.892374, 0.095374],
                 [0.896320, 0.893616, 0.096335],
                 [0.906311, 0.894855, 0.098125],
                 [0.916242, 0.896091, 0.100717],
                 [0.926106, 0.897330, 0.104071],
                 [0.935904, 0.898570, 0.108131],
                 [0.945636, 0.899815, 0.112838],
                 [0.955300, 0.901065, 0.118128],
                 [0.964894, 0.902323, 0.123941],
                 [0.974417, 0.903590, 0.130215],
                 [0.983868, 0.904867, 0.136897],
                 [0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
                     ('inferno', _inferno_data),
                     ('plasma', _plasma_data),
                     ('viridis', _viridis_data)):
    cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
 | 
	mit | 
| 
	Tarskin/HappyTools | 
	HappyTools.py | 
	1 | 
	16117 | 
	#! /usr/bin/env python
#
# Copyright 2017-2019 Bas C. Jansen
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the Apache 2.0 license along
# with this program; if not, see
# http://www.apache.org/licenses/LICENSE-2.0
# Compatability check
import HappyTools.util.requirement_checker as req_check
req_check.check_requirements()
# General imports
from matplotlib.backends.backend_tkagg import (
    FigureCanvasTkAgg, NavigationToolbar2Tk
)
import importlib
import logging
import os
import tkinter as tk
import tkinter.filedialog as filedialog
import tkinter.messagebox as messagebox
from matplotlib import image, figure
from pathlib import Path, PurePath
# Platform specific bits
if os.name == 'posix':
    import matplotlib
    matplotlib.use('TkAgg')
# Custom libraries
from HappyTools.util.peak_detection import PeakDetection
from HappyTools.util.functions import (check_disk_access,
                                       save_calibrants, read_peak_list)
from HappyTools.util.output import Output
# Gui elements
from HappyTools.gui.custom_toolbar import CustomToolbar
from HappyTools.gui.about_window import AboutWindow
from HappyTools.gui.batch_window import batchWindow
from HappyTools.gui.settings_window import SettingsWindow
from HappyTools.gui.output_window import OutputWindow
from HappyTools.gui.progress_bar import ProgressBar
import HappyTools.gui.version as version
# Class imports
from HappyTools.bin.chromatogram import Chromatogram, finalize_plot
from HappyTools.bin.process_parameters import ProcessParameters
from HappyTools.bin.output_parameters import OutputParameters
from HappyTools.bin.settings import Settings
# Directories
directories = [
    Path.cwd() / 'HappyTools' / 'plugins',
    Path.cwd() / 'HappyTools' / 'gui',
    Path.cwd() / 'HappyTools' / 'bin',
    Path.cwd() / 'HappyTools' / 'util'
]
# Function overwrites
def dynamic_update(foo):
    pass
NavigationToolbar2Tk.dynamic_update = dynamic_update
# Applicatiom
class HappyToolsGui(object):
    @classmethod
    def run(cls):
        root = tk.Tk()
        HappyToolsGui(root)
        root.mainloop()
    def __init__(self, master):
        # Inherit Tk() root object
        self.master = master
        # Define task_label for progress bar functionality
        task_label = tk.StringVar()
        task_label.set('Idle')
        # LOGGING
        logging.basicConfig(filename='HappyTools.log',
                            format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
                            datefmt='%Y-%m-%d %H:%M', filemode='a',
                            level=logging.WARNING)
        # ACCESS CHECK
        self.directories = directories
        if not check_disk_access(self):
            messagebox.showinfo(
                'Access Error', 'HappyTools does ' +
                'not have sufficient disk access rights. Please close ' +
                'HappyTools and check if the current user has read/' +
                'write access to all folders in the Happytools folder.')
        # CANVAS
        fig = figure.Figure(figsize=(12,6))
        axes = fig.add_subplot(111)
        axes.axis('off')
        canvas = FigureCanvasTkAgg(fig, master=master)
        CustomToolbar(canvas, master)
        canvas.get_tk_widget().pack(fill=tk.BOTH, expand=tk.YES)
        canvas.draw()
        # FRAME
        tk.Frame(master)
        master.title('HappyTools '+str(version.version) +
                     ' (Build '+str(version.build)+')')
        iconbitmap = Path.cwd() / 'HappyTools' / 'gui' / 'assets' / 'Icon.ico'
        backgroundimage = Path.cwd() / 'HappyTools' / 'gui' / 'assets' / 'UI.png'
        try:
            master.iconbitmap(default=iconbitmap)
        except tk.TclError as e:
            logging.getLogger(__name__).warning(e)
        if backgroundimage.is_file():
            img = image.imread(str(backgroundimage))
            axes.imshow(img)
            axes.set_aspect('auto')
        task  = tk.Label(master, textvariable=task_label, width=20)
        task.pack()
        progress = ProgressBar(self.master)
        progress.bar.pack(fill=tk.X)
        # QUIT
        master.protocol('WM_DELETE_WINDOW', self.close)
        # MENU
        menu = tk.Menu(master)
        master.config(menu=menu)
        filemenu = tk.Menu(menu, tearoff=0)
        menu.add_cascade(label='File', menu=filemenu)
        filemenu.add_command(label='Open Chromatogram',
                             command=self.open_chromatogram_window)
        filemenu.add_command(label='Smooth Chromatogram',
                             command=self.smooth_chromatogram)
        filemenu.add_command(label='Baseline Correction',
                             command=self.baseline_correction)
        filemenu.add_command(label='Normalize chromatogram',
                             command=self.normalize_chromatogram)
        filemenu.add_command(label='Save Chromatogram',
                             command=self.save_chromatogram)
        processmenu = tk.Menu(menu, tearoff=0)
        menu.add_cascade(label='Process', menu=processmenu)
        processmenu.add_command(label='Calibrate Chromatogram',
                                command=self.calibrate_chromatogram)
        processmenu.add_command(label='Quantify Chromatogram',
                                command=self.quantify_chromatogram)
        processmenu.add_command(label='Select Outputs',
                                command=self.open_output_window)
        advancedmenu = tk.Menu(menu, tearoff=0)
        menu.add_cascade(label='Advanced', menu=advancedmenu)
        advancedmenu.add_command(label='Peak Detection',
                                 command=self.peak_detection)
        advancedmenu.add_command(label='Save Calibrants',
                                 command=self.save_calibrants)
        advancedmenu.add_command(label='Save Annotation',
                                 command=self.save_annotation)
        batchmenu = tk.Menu(menu, tearoff=0)
        menu.add_cascade(label='Batch', menu=batchmenu)
        batchmenu.add_command(label='Batch Process',
                              command=self.open_batch_window)
        settingsmenu = tk.Menu(menu, tearoff=0)
        menu.add_cascade(label='Settings', menu=settingsmenu)
        settingsmenu.add_command(label='Settings',
                                 command=self.settings_window)
        aboutmenu = tk.Menu(menu, tearoff=0)
        menu.add_cascade(label='About', menu=aboutmenu)
        aboutmenu.add_command(label='About HappyTools',
                              command=self.open_about_window)
        if (Path.cwd() / 'HappyTools' / 'plugins').glob("*.py"):
            pluginsmenu = tk.Menu(menu,tearoff=0)
            menu.add_cascade(label="Plugins", menu=pluginsmenu)
            for file in(Path.cwd() / 'HappyTools' / 'plugins').glob('*.py'):
                if '__' in str(file):
                    continue
                module_name = PurePath(file).stem
                module = "HappyTools.plugins."+str(module_name)
                module = importlib.import_module(module)
                try:
                    module_name = module.name
                except Exception as e:
                    logging.getLogger(__name__).error(e)
                pluginsmenu.add_command(label=module_name,
                                        command=self.make_function(module))
        # INHERITANCE
        self.logger = logging.getLogger(__name__)
        self.settings = Settings(self)
        self.output_parameters = OutputParameters(self)
        self.process_parameters = ProcessParameters(self)
        self.axes = axes
        self.canvas = canvas
        self.progress = progress
        self.task_label = task_label
    def make_function(self, module):
        try:
            def x():
                return module.start(self)
        except AttributeError as e:
            self.logger.error('Problem with the plugin: '+str(e))
        return x
    @classmethod
    def open_about_window(cls):
        AboutWindow()
    def open_chromatogram_window(self):
        files = filedialog.askopenfilenames(title='Open Chromatogram File(s)')
        data = []
        if files:
            self.task_label.set('Opening Chromatograms')
            self.progress.reset_bar()
            for index, filename in enumerate(files):
                self.progress.counter.set((float(index) /
                        len(files))*100)
                self.progress.update_progress_bar()
                self.filename = Path(filename)
                chromatogram = Chromatogram(self)
                chromatogram.open_chromatogram()
                data.append(chromatogram)
            self.data = data
            self.task_label.set('Idle')
            self.progress.fill_bar()
        self.axes.clear()
        for chrom in self.data:
            chrom.plot_chromatogram()
        finalize_plot(self)
    def open_output_window(self):
        OutputWindow(self)
    def open_settings_window(self):
        self.settings.settings_popup(self.settings)
    def calibrate_chromatogram(self):
        try:
            self.process_parameters.calibration = True
            self.process_parameters.calibration_file = filedialog.askopenfilename(
                title='Select Calibration File')
            if not self.process_parameters.calibration_file:
                self.process_parameters.quantitation = False
                return
            self.reference = read_peak_list(
                    self.process_parameters.calibration_file)
            self.progress.reset_bar()
            self.task_label.set('Calibrating Chromatograms')
            for index, self.chrom in enumerate(self.data):
                self.progress.counter.set((float(index) /
                        len(self.data))*100)
                self.progress.update_progress_bar()
                self.chrom.determine_calibration_timepairs()
                # Still need to include a check against number of calibrants
                self.chrom.determine_calibration_function()
                self.chrom.calibrate_chromatogram()
            self.task_label.set('Idle')
            self.progress.fill_bar()
            self.process_parameters.quantitation = False
        except Exception as e:
            self.logger.error(e)
        self.progress.fill_bar()
        self.axes.clear()
        self.progress.reset_bar()
        self.task_label.set('Plotting Chromatograms')
        for index, chrom in enumerate(self.data):
            self.progress.counter.set((float(index) /
                    len(self.data))*100)
            self.progress.update_progress_bar()
            chrom.plot_chromatogram()
        finalize_plot(self)
        self.task_label.set('Idle')
        self.progress.fill_bar()
    def close(self):
        self.master.destroy()
        self.master.quit()
    def generate_pdf_reports(self):
        self.progress.reset_bar()
        self.task_label.set('Generating PDF reports')
        for index, chrom in enumerate(self.data):
            self.progress.counter.set((float(index) /
                    len(self.data))*100)
            self.progress.update_progress_bar()
            chrom.generate_pdf_report()
        self.task_label.set('Idle')
        self.progress.fill_bar()
    def open_batch_window(self):
        batchWindow(self)
    def normalize_chromatogram(self):
        try:
            self.task_label.set('Normalizing Chromatograms')
            self.progress.reset_bar()
            self.axes.clear()
            for index, chrom in enumerate(self.data):
                self.progress.counter.set((float(index) /
                        len(self.data))*100)
                self.progress.update_progress_bar()
                chrom.normalize_chromatogram()
                chrom.plot_chromatogram()
            finalize_plot(self)
            self.task_label.set('Idle')
            self.progress.fill_bar()
        except Exception as e:
            self.logger.error(e)
    def quantify_chromatogram(self):
        try:
            self.process_parameters.quantitation = True
            self.process_parameters.quanititation_file = filedialog.askopenfilename(
                title='Select Quantitation File')
            if not self.process_parameters.quanititation_file:
                self.process_parameters.quantitation = False
                return
            self.reference = read_peak_list(
                    self.process_parameters.quanititation_file)
            self.progress.reset_bar()
            self.task_label.set('Quantifying Chromatograms')
            for index, chrom in enumerate(self.data):
                self.progress.counter.set((float(index) /
                        len(self.data))*100)
                self.progress.update_progress_bar()
                chrom.quantify_chromatogram()
            self.task_label.set('Idle')
            self.progress.fill_bar()
            if self.output_parameters.pdf_report.get() == True:
                self.generate_pdf_reports()
            self.output = Output(self)
            self.output.init_output_file()
            self.output.build_output_file()
            self.process_parameters.quantitation = False
        except Exception as e:
            self.logger.error(e)
    def peak_detection(self):
        try:
            self.axes.clear()
            for self.chrom in self.data:
                self.detected_peaks = PeakDetection(self)
                self.detected_peaks.detect_peaks()
                self.detected_peaks.plot_peaks()
                self.chrom.plot_chromatogram()
            finalize_plot(self)
        except Exception as e:
            self.logger.error(e)
    def save_annotation(self):
        try:
            for self.chrom in self.data:
                self.detected_peaks.write_peaks()
        except Exception as e:
            self.logger.error(e)
    def save_calibrants(self):
        save_calibrants(self)
    def smooth_chromatogram(self):
        try:
            self.task_label.set('Smoothing Chromatograms')
            self.progress.reset_bar()
            self.axes.clear()
            for index, chrom in enumerate(self.data):
                self.progress.counter.set((float(index) /
                        len(self.data))*100)
                self.progress.update_progress_bar()
                chrom.smooth_chromatogram()
                chrom.plot_chromatogram()
            finalize_plot(self)
            self.task_label.set('Idle')
            self.progress.fill_bar()
        except Exception as e:
            self.logger.error(e)
    def save_chromatogram(self):
        try:
            for chrom in self.data:
                chrom.save_chromatogram()
        except Exception as e:
            self.logger.error(e)
    def settings_window(self):
        try:
            SettingsWindow(self)
        except Exception as e:
            self.logger.error(e)
    def baseline_correction(self):
        try:
            self.task_label.set('Baseline Correcting')
            self.progress.reset_bar()
            self.axes.clear()
            for index, chrom in enumerate(self.data):
                self.progress.counter.set((float(index) /
                        len(self.data))*100)
                self.progress.update_progress_bar()
                chrom.baseline_correction()
                chrom.plot_chromatogram()
            finalize_plot(self)
            self.task_label.set('Idle')
            self.progress.fill_bar()
        except Exception as e:
            self.logger.error(e)
# Call the main app
if __name__ == '__main__':
    HappyToolsGui.run()
 | 
	apache-2.0 | 
| 
	cjekel/piecewise_linear_fit_py | 
	examples/weighted_least_squares_ex.py | 
	1 | 
	1245 | 
	from time import time
import os
os.environ['OMP_NUM_THREADS'] = '1'
import numpy as np
import matplotlib.pyplot as plt
import pwlf
t0 = time()
np.random.seed(123)
n = 100
n_data_sets = 100
n_segments = 6
# generate sine data
x = np.linspace(0, 10, n)
y = np.zeros((n_data_sets, n))
sigma_change = np.linspace(0.001, 0.05, 100)
for i in range(n_data_sets):
    y[i] = np.sin(x * np.pi / 2)
    # add noise to the data
    y[i] = np.random.normal(0, sigma_change, 100) + y[i]
X = np.tile(x, n_data_sets)
# perform an ordinary pwlf fit to the entire data
my_pwlf = pwlf.PiecewiseLinFit(X.flatten(), y.flatten())
my_pwlf.fit(n_segments)
# compute the standard deviation in y
y_std = np.std(y, axis=0)
# set the weights to be one over the standard deviation
weights = 1.0 / y_std
# perform a weighted least squares to the data
my_pwlf_w = pwlf.PiecewiseLinFit(x, y.mean(axis=0), weights=weights)
my_pwlf_w.fit(n_segments)
# compare the fits
xhat = np.linspace(0, 10, 1000)
yhat = my_pwlf.predict(xhat)
yhat_w = my_pwlf_w.predict(xhat)
t1 = time()
print('Runtime:', t1-t0)
plt.figure()
plt.plot(X.flatten(), y.flatten(), '.')
plt.plot(xhat, yhat, '-', label='Ordinary LS')
plt.plot(xhat, yhat_w, '-', label='Weighted LS')
plt.legend()
plt.show()
 | 
	mit | 
| 
	wabu/zeroflo | 
	zeroflo/flows/read/http.py | 
	1 | 
	5289 | 
	from .ressource import Ressource, Directory, Stats
from ...ext.params import param
from pyadds.annotate import cached, delayed
from pyadds.logging import log
import pandas as pd
import asyncio
import aiohttp
coroutine = asyncio.coroutine
from functools import wraps
@log
class HTTPConnection:
    """ aiohttp based http connection to server """
    def __init__(self, base, connect={}, loop=None, **rqs):
        self.base = base
        self.connect = connect
        if 'auth' in rqs:
            rqs['auth'] = aiohttp.helpers.BasicAuth(*rqs['auth'])
        self.rqs = rqs
    @delayed
    def connector(self):
        return (self.rqs.pop('connector', None)
                or aiohttp.TCPConnector(loop=asyncio.get_event_loop(), **self.connect))
    @cached
    def methods(self):
        return {}
    @coroutine
    def request(self, method, path, **kws):
        kws.update(self.rqs)
        url = '/'.join([self.base, path])
        if 'connector' not in kws:
            kws['connector'] = self.connector
        return (yield from aiohttp.request(method, url, **kws))
    def __getattr__(self, name):
        if name.startswith('_'):
            raise AttributeError
        try:
            return self.methods[name]
        except KeyError:
            request = self.request
            method = name.upper()
            @coroutine
            @wraps(request)
            def rq(path, **kws):
                return (yield from request(method, path, **kws))
            self.methods[name] = rq
            return rq
@log
class HTTPRessource(Ressource):
    """ ressource access via http """
    def __init__(self, path, conn):
        super().__init__(path)
        self.conn = conn
    @property
    @coroutine
    def stat(self):
        r = yield from self.conn.head(self.path)
        if r.status >= 300:
            return None
        h = r.headers
        return Stats(self.path, self.path.endswith('/'),
                pd.Timestamp(h.get('last-modified', pd.NaT)),
                int(h.get('content-length', -1)))
    @coroutine
    def text(self, encoding=None):
        r = yield from self.conn.get(self.path)
        try:
            return (yield from r.text(encoding=encoding))
        finally:
            yield from r.release()
    @coroutine
    def bytes(self):
        r = yield from self.conn.get(self.path)
        try:
            return (yield from r.read())
        finally:
            yield from r.release()
    @coroutine
    def reader(self, offset=None):
        opts = {}
        if offset:
            opts['headers'] = {'Range': 'bytes=%d-' % offset}
        r = yield from self.conn.get(self.path, **opts)
        self.raise_from_status(r, exspect=206 if offset else 200)
        reader = r.content
        if offset:
            if r.status != 206:
                self.__log.warning('read %s with offset=%d, but no partial response', self.path, offset)
                skip = offset
            else:
                result = int(r.headers['content-range'].split('-',1)[0].rsplit(' ')[-1])
                if result != offset:
                    self.__log.warning('read %s with offset=%d returned foo', self.path, offset)
                    skip = offset-result
                else:
                    skip = 0
            if skip:
                try:
                    yield from reader.readexactly(skip)
                except asyncio.IncompleteReadError as e:
                    raise OSError from e
        return reader, r
    def raise_from_status(self, r, exspect=200):
        error = None
        if 400 <= r.status < 500:
            error = 'Client'
        elif 500 <= r.status < 600:
            error = 'Server'
        elif r.status >= 600 or r.status < 100:
            error = 'Unexpected'
        elif r.status != exspect:
            self.__log.debug('returning normaly with status %d %s (%d exspected)', r.status, r.reason, exspect)
        if error:
            raise OSError(r.status,
                          '{} {} Error: {}'.format(r.status, error, r.reason),
                          self.path)
@log
class HTTPDirectory(HTTPRessource, Directory):
    """ directory access via http """
    def __init__(self, path, conn, columns=None, **rqs):
        super().__init__(path, conn=conn)
        self.columns = columns
        self.rqs = rqs
    def extract_stat(self, stat):
        return Stats(stat['name'], stat['dir'], pd.Timestamp(stat['modified']), stat['size'])
    @coroutine
    def stats(self, glob=None):
        html = yield from self.text()
        tbl, = pd.read_html(html, **self.rqs)
        if self.columns:
            if isinstance(self.columns, dict):
                tbl = tbl.rename(columns=self.columns)
            else:
                tbl.columns=self.columns
        if 'dir' not in tbl:
            tbl['dir'] = tbl['name'].str.endswith('/')
        if tbl['size'].dtype == object:
            tbl['size'] = tbl['size'].apply(param.sizeof)
        return list(tbl.apply(self.extract_stat, axis=1).values)
    def open(self, name: str):
        return HTTPRessource('/'.join([self.path, name]), conn=self.conn)
    def go(self, name: str):
        return HTTPDirectory('/'.join([self.path, name]), conn=self.conn, columns=self.columns, **self.rqs)
 | 
	mit | 
| 
	drankye/arrow | 
	python/pyarrow/tests/test_column.py | 
	1 | 
	1595 | 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
from pyarrow.compat import unittest
import pyarrow as arrow
A = arrow
import pandas as pd
class TestColumn(unittest.TestCase):
    def test_basics(self):
        data = [
            A.from_pylist([-10, -5, 0, 5, 10])
        ]
        table = A.Table.from_arrays(('a'), data, 'table_name')
        column = table.column(0)
        assert column.name == 'a'
        assert column.length() == 5
        assert len(column) == 5
        assert column.shape == (5,)
    def test_pandas(self):
        data = [
            A.from_pylist([-10, -5, 0, 5, 10])
        ]
        table = A.Table.from_arrays(('a'), data, 'table_name')
        column = table.column(0)
        series = column.to_pandas()
        assert series.name == 'a'
        assert series.shape == (5,)
        assert series.iloc[0] == -10
 | 
	apache-2.0 | 
| 
	btjhjeon/ConversationalQA | 
	skipthoughts/eval_trec.py | 
	2 | 
	3280 | 
	'''
Evaluation code for the TREC dataset
'''
import numpy as np
import skipthoughts
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold
from sklearn.utils import shuffle
def evaluate(model, k=10, seed=1234, evalcv=True, evaltest=False):
    """
    Run experiment
    k: number of CV folds
    test: whether to evaluate on test set
    """
    print 'Preparing data...'
    traintext, testtext = load_data()
    train, train_labels = prepare_data(traintext)
    test, test_labels = prepare_data(testtext)
    train_labels = prepare_labels(train_labels)
    test_labels = prepare_labels(test_labels)
    train, train_labels = shuffle(train, train_labels, random_state=seed)
    print 'Computing training skipthoughts...'
    trainF = skipthoughts.encode(model, train, verbose=False, use_eos=False)
    
    if evalcv:
        print 'Running cross-validation...'
        interval = [2**t for t in range(0,9,1)]     # coarse-grained
        C = eval_kfold(trainF, train_labels, k=k, scan=interval, seed=seed)
    if evaltest:
        if not evalcv:
            C = 128     # Best parameter found from CV
        print 'Computing testing skipthoughts...'
        testF = skipthoughts.encode(model, test, verbose=False, use_eos=False)
        print 'Evaluating...'
        clf = LogisticRegression(C=C)
        clf.fit(trainF, train_labels)
        yhat = clf.predict(testF)
        print 'Test accuracy: ' + str(clf.score(testF, test_labels))
def load_data(loc='./data/'):
    """
    Load the TREC question-type dataset
    """
    train, test = [], []
    with open(loc + 'train_5500.label', 'rb') as f:
        for line in f:
            train.append(line.strip())
    with open(loc + 'TREC_10.label', 'rb') as f:
        for line in f:
            test.append(line.strip())
    return train, test
def prepare_data(text):
    """
    Prepare data
    """
    labels = [t.split()[0] for t in text]
    labels = [l.split(':')[0] for l in labels]
    X = [t.split()[1:] for t in text]
    X = [' '.join(t) for t in X]
    return X, labels
def prepare_labels(labels):
    """
    Process labels to numerical values
    """
    d = {}
    count = 0
    setlabels = set(labels)
    for w in setlabels:
        d[w] = count
        count += 1
    idxlabels = np.array([d[w] for w in labels])
    return idxlabels
def eval_kfold(features, labels, k=10, scan=[2**t for t in range(0,9,1)], seed=1234):
    """
    Perform k-fold cross validation
    """
    npts = len(features)
    kf = KFold(npts, n_folds=k, random_state=seed)
    scores = []
    for s in scan:
        scanscores = []
        for train, test in kf:
            # Split data
            X_train = features[train]
            y_train = labels[train]
            X_test = features[test]
            y_test = labels[test]
            # Train classifier
            clf = LogisticRegression(C=s)
            clf.fit(X_train, y_train)
            score = clf.score(X_test, y_test)
            scanscores.append(score)
            print (s, score)
        # Append mean score
        scores.append(np.mean(scanscores))
        print scores
    # Get the index of the best score
    s_ind = np.argmax(scores)
    s = scan[s_ind]
    print (s_ind, s)
    return s
 | 
	mit | 
| 
	gfrd/egfrd | 
	samples/mapk/rebind_ratio/plot_hist.py | 
	6 | 
	6368 | 
	#!/usr/bin/env python
# D=1
# python plot_hist.py "." mapk3_1e-15_1_fixed_1e-1_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-2_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-3_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-4_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-5_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-6_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_0_normal_ALL_reactions.rebind
# t_half = 1e-6
rfiles = ['mapk3_1e-15_0.03125_fixed_1e-6_normal_ALL_reactions.rebind',
          'mapk3_1e-15_0.0625_fixed_1e-6_normal_ALL_reactions.rebind',
          'mapk3_1e-15_0.25_fixed_1e-6_normal_ALL_reactions.rebind',
          'mapk3_1e-15_1_fixed_1e-6_normal_ALL_reactions.rebind',
          'mapk3_1e-15_4_fixed_1e-6_normal_ALL_reactions.rebind'] 
sfiles = []
# t_half = 1e-2
rfiles = ['mapk3_1e-15_0.03125_fixed_1e-2_normal_ALL_reactions.rebind',
          'mapk3_1e-15_0.0625_fixed_1e-2_normal_ALL_reactions.rebind',
          'mapk3_1e-15_0.25_fixed_1e-2_normal_ALL_reactions.rebind',
          'mapk3_1e-15_1_fixed_1e-2_normal_ALL_reactions.rebind',
          'mapk3_1e-15_4_fixed_1e-2_normal_ALL_reactions.rebind']
sfiles=[]
sdir = 's02/data/'
sfiles = ['model3-smallt_0.03125_1e-2_ALL_t.dat',
          'model3-smallt_0.0625_1e-2_ALL_t.dat',
          'model3-smallt_0.25_1e-2_ALL_t.dat',
          'model3-smallt_1_1e-2_ALL_t.dat',
          'model3-smallt_4_1e-2_ALL_t.dat']
from matplotlib.pylab import *
import math
import numpy
import sys
import re
import glob
def load_sfile(sfile):
    sfile = sfile.replace('ALL', '*')
    filelist = glob.glob(sdir + sfile)
    print filelist
    N = 0
    data = []
    for fname in filelist:
        f = open(fname)
        firstline = f.readline()
        n = int(firstline)
        #print 'N', n
        d = [float(line) for line in f.readlines()]
        f.close()
        N += n
        data.extend(d)
    print 'supplementary data:', N, '(', len(data), ')'
    return data, N
        
def plot_hist(filename, xmin, xmax, BINS, pattern=None, factor=1.0, 
              sfile=None):
    if sfile != None:
        thr = 1e-5
    else:
        thr = 1e-20
    file = open(filename)
    data=[]
    for line in file.readlines():
        line = line.split()
        t = float(line[0])
        event_type = line[1]
        if t == 0:
            print 'skip zero'
            continue 
        if pattern == None or pattern.match(event_type):
            data.append(t)
    file.close()
    data = numpy.array(data)
    N = len(data)
    data = data.compress(data != numpy.inf)
    n, bins = numpy.histogram(numpy.log10(data), 
                              range=(numpy.log10(thr),numpy.log10(data.max())),
                              bins=BINS/2, new=True)
    n = n.astype(numpy.floating)
    n /= float(N)
    n *= factor
    #x = 10**bins[:-1]
    x = (10**bins[1:] + 10**bins[:-1]) / 2
    dx = (10**bins[1:]- 10**bins[:-1])
    y = n / dx    #  n+1e-10
    print x, y
    if sfile != None:
        print sfile
        sdata, sN = load_sfile(sfile)
        sdata = numpy.array(sdata)
        #sdata = numpy.compress(sdata <= thr,sdata)
        sn, sbins = numpy.histogram(numpy.log10(sdata), 
                                    range=(numpy.log10(sdata.min()),
                                           numpy.log10(thr)),
                                    bins=BINS/3, new=True)
        sn = sn.astype(numpy.floating)
        sn /= float(sN)
        sn *= factor
        sx = (10**sbins[1:] + 10**sbins[:-1]) / 2
        sdx = (10**sbins[1:]- 10**sbins[:-1])
        sy = sn / sdx    #  n+1e-10
        x = numpy.concatenate((sx, x))
        y = numpy.concatenate((sy, y))
        print N, sN, len(sdata)
    return loglog(x, y)#, label=filename )
def plot_hist2(filename, xmin, xmax, N, pattern=None, factor=1.0):
    file = open(filename)
    data=[]
    for line in file.readlines():
        line = line.split()
        t = float(line[0])
        event_type = line[1]
        if t == 0:
            print 'skip zero'
            continue 
        if pattern == None or pattern.match(event_type):
            data.append(t)
    data = numpy.array(data)
    data.sort()
    i = 0
    p = 5
    x = []
    y = []
    ld = len(data)
    while i+p < ld:
        slice = data[i:i+p]
        min, max = slice.min(), slice.max()
        x.append((min + max) / 2)
        y.append(1.0 / (max - min))
        i += p
    y = numpy.array(y,numpy.floating)
    y /= float(len(data))
    y *= factor
    return loglog(x, y)#, label=filename )
if __name__ == '__main__':
    import numpy
    BINS=50
    #pattern = re.compile(sys.argv[1])
    
    #xmin = 1e-12
    xmin = 1e-8
    xmax = 100
    
    axes([.16,.16,.8,.8])
    Dlist = [0.03e-12,0.06e-12,0.25e-12,1e-12, 4e-12]
    lines=[]
    for n, filename in enumerate(rfiles):
        D = Dlist[n]
        if len(sfiles) >= 1:
            sfile = sfiles[n]
        else:
            sfile = None
        sigma = 5e-9
        kD = 4 * numpy.pi * sigma * D
        k_a = 9.2e-20#1.6e9 / (1000*6e23)
        #factor = D * (1 + (k_a / kD))
        factor = 1
        print 'factor', factor
        line = plot_hist(filename, xmin, xmax, BINS, None, factor, sfile = sfile)
        lines.append(line)
    xlabel('Second association times', size=26)
    ylabel('Relative frequency', size=26)
    #ylabel(r'$p(t) \cdot D (1 + (k_a / kD))$', size=26)
    xticks([1e-12, 1e-9, 1e-6, 1e-3, 1], 
           [r'${\rm 1 ps}$',
            r'${\rm 1 ns}$',
            r'${\rm 1 \mu s}$',
            r'${\rm 1 ms}$',
            r'${\rm 1 s}$'],
           size=24)
    yticks(size=18)
    
    xlim(xmin, xmax)
    ylim(5e-5, 5e5)
    leg = legend( lines, (r'$D=0.03 \ \ {\rm \mu m^2 / s}$',
                         r'$D=0.06 \ \  {\rm \mu m^2 / s}$',
#                          #              r'$D=0.13 \ \  {\rm \mu m^2 / s}$',
                          r'$D=0.25 \ \  {\rm \mu m^2 / s}$',
                          r'$D=1.0 \ \  {\rm \mu m^2 / s}$',
                          r'$D=4.0 \ \  {\rm \mu m^2 / s}$',
                         ),
                  loc=3,
                  shadow=True,
                  pad=0.05,
                  labelsep=0
                  )
    for l in leg.get_lines():
        l.set_linewidth(1.5)  # the legend line width
    show()
 | 
	gpl-2.0 | 
| 
	DiCarloLab-Delft/PycQED_py3 | 
	pycqed/tests/test_butterfly_analysis.py | 
	1 | 
	1317 | 
	import pycqed as pq
import matplotlib.pyplot as plt
import os
from pycqed.analysis import measurement_analysis as ma
from numpy.testing import assert_almost_equal
class TestSSRODiscriminationAnalysis:
    @classmethod
    def setup_class(cls):
        cls.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
        ma.a_tools.datadir = cls.datadir
    def test_butterfly_postselected(self):
        # Test the correct file is loaded
        a = ma.butterfly_analysis(timestamp='20170710_180002',
                                  close_main_fig=False, initialize=True,
                                  threshold=0.5,
                                  digitize=False, case=True)
        assert_almost_equal(a.butterfly_coeffs['F_a_butterfly'], 0.7998,
                            decimal=3)
    def test_butterfly_simple(self):
        # Test the correct file is loaded
        a = ma.butterfly_analysis(timestamp='20170710_182949',
                                  close_main_fig=False, initialize=False,
                                  threshold=0.5,
                                  digitize=False, case=True)
        assert_almost_equal(a.butterfly_coeffs['F_a_butterfly'], 0.819,
                            decimal=3)
    @classmethod
    def teardown_class(cls):
        plt.close('all')
 | 
	mit | 
| 
	Tjorriemorrie/trading | 
	17_rl_ma_bl_ts_sl_tp/beta/train.py | 
	1 | 
	6854 | 
	import logging
import numpy as np
import argparse
import pandas as pd
from time import time
from pprint import pprint
from random import random, choice, shuffle, randint
from main import loadData, loadQ, saveQ, getBackgroundKnowledge, summarizeActions
from world import DATA, PERIODS, ACTIONS, getState, getReward
def main(debug):
    minutes = 0
    while True:
        minutes += 1
        seconds_to_run = 60 * minutes
        seconds_info_intervals = seconds_to_run / 5
        logging.error('Training each currency for {0} minutes'.format(minutes))
        shuffle(DATA)
        for info in DATA:
            logging.debug('Currency info: {0}'.format(info))
            currency = info['currency']
            interval = info['intervals'][0]
            pip_mul = info['pip_mul']
            df = loadData(currency, interval, 'train')
            df = getBackgroundKnowledge(df, PERIODS)
            alpha = 0.
            epsilon = alpha / 2.
            q = loadQ(currency, interval)
            time_start = time()
            time_interval = seconds_info_intervals
            epoch = 0
            rewards = []
            errors = []
            ticks = []
            logging.warn('Training {0} on {1} with {2} ticks [m:{3}]'.format(
                currency,
                interval,
                len(df),
                minutes,
            ))
            while True:
                epoch += 1
                logging.info(' ')
                logging.info('{0}'.format('=' * 20))
                logging.info('EPOCH {0}'.format(epoch))
                index_start = randint(0, len(df)-20)
                df_inner = df.iloc[index_start:]
                logging.info('Epoch: at {0} with {1} ticks'.format(index_start, len(df_inner)))
                q, r, error, tick = train(df_inner, q, alpha, epsilon, PERIODS, ACTIONS, pip_mul, info['std'])
                # results
                error *= pip_mul
                rewards.append(r)
                errors.append(error)
                ticks.append(tick)
                # win ratio
                wins = [1. if r > 0. else 0. for r in rewards]
                win_ratio = np.mean(wins)
                # logging.error('wr {0}'.format(win_ratio))
                # adjust values
                alpha = 1.0102052281586786e+000 + (-2.0307383627607809e+000 * win_ratio) + (1.0215546892913909e+000 * win_ratio**2)
                epsilon = 3.9851080604500078e-001 + (2.1874724815820201e-002 * win_ratio) + (-4.1444101741886652e-001 * win_ratio**2)
                # logging.error('new alpha = {0}'.format(alpha))
                # only do updates at interval
                if time() - time_start > time_interval or debug:
                    # prune lengths
                    while len(rewards) > 1000 + minutes * 1000:
                        rewards.pop(0)
                        errors.pop(0)
                        ticks.pop(0)
                    # RMSD
                    rmsd = np.sqrt(np.mean([e**2 for e in errors]))
                    # logging.error('RMSD: {0} from new error {1}'.format(rmsd, error))
                    logging.warn('{0} [{1:05d}] RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}, a:{7:.2f}, e:{8:.2f}]'.format(
                        currency,
                        epoch,
                        int(rmsd),
                        int(np.mean(rewards) * pip_mul),
                        sum(rewards),
                        np.mean(wins) * 100,
                        np.mean(ticks),
                        alpha * 100,
                        epsilon * 100,
                    ))
                    # exit
                    if time_interval >= seconds_to_run or debug:
                        break
                    # continue
                    time_interval += seconds_info_intervals
                    saveQ(currency, interval, q)
            saveQ(currency, interval, q)
            summarizeActions(q)
            if debug:
                break  # currencies
        if debug:
            break  # forever
########################################################################################################
# SARSA
########################################################################################################
def train(df, q, alpha, epsilon, periods, actions, pip_mul, std):
    logging.info('Training: started...')
    d = None
    # initial state
    s = getState(df, periods)
    # initial action
    a = getAction(q, s, epsilon, actions)
    # get reward
    r, ticks = getReward(df, a, pip_mul, std)
    # get delta
    d = getDelta(q, s, a, r)
    # update Q
    q = updateQ(q, s, a, d, r, alpha)
    return q, r, d, ticks
def getAction(q, s, epsilon, actions):
    logging.info('Action: finding...')
    # exploration
    if random() < epsilon:
        logging.debug('Action: explore (<{0:.2f})'.format(epsilon))
        a = choice(actions)
    # exploitation
    else:
        logging.debug('Action: exploit (>{0:.2f})'.format(epsilon))
        q_max = None
        for action in actions:
            q_sa = q.get('|'.join([s, action]), random() * 10.)
            logging.debug('Qsa action {0} is {1:.4f}'.format(action, q_sa))
            if q_sa > q_max:
                q_max = q_sa
                a = action
    logging.info('Action: found {0}'.format(a))
    return a
def getDelta(q, s, a, r):
    logging.info('Delta: calculating...')
    q_sa = q.get('|'.join([s, a]), 0.)
    d = r - q_sa
    # logging.error('Delta: {2:.4f} <= r [{0:.4f}] - Qsa [{1:0.4f}]'.format(r, q_sa, d))
    logging.info('Delta: {0:.4f}'.format(d))
    return d
def updateQ(q, s, a, d, r, alpha):
    logging.info('Q: updating learning at {0:.2f}...'.format(alpha))
    # update q
    sa = '|'.join([s, a])
    q_sa = q.get(sa, 0)
    logging.debug('Q: before {0:.4f}'.format(q_sa))
    q_sa_updated = q_sa + (alpha * d)
    # logging.error('Q: {3:.4f} <= qsa [{0:.4f}] + (alpha [{1:0.3f}] * d [{2:.4f}])'.format(q_sa, alpha, d, q_sa_updated))
    q[sa] = q_sa_updated
    logging.debug('Q: after {0:.4f}'.format(q_sa, q_sa_updated))
    return q
########################################################################################################
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-v', '--verbose', action='store_true')
    parser.add_argument('-vv', '--very_verbose', action='store_true')
    args = parser.parse_args()
    verbose = args.verbose
    very_verbose = args.very_verbose
    lvl = logging.DEBUG if very_verbose else (logging.INFO if verbose else logging.WARN)
    logging.basicConfig(
        level=lvl,
        format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
        # datefmt='%Y-%m-%d %H:%M:',
    )
    debug = verbose or very_verbose
    main(debug) | 
	mit | 
| 
	mkeyran/EmotionRecognizer | 
	decision_trees_ensemble.py | 
	1 | 
	7293 | 
	
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from random import randint
import numpy as np
import pickle
import base_model
class DecisionForest(base_model.ClassificationModel):
    """ Класс, предоставляющий модель решающего леса (ансамбля решающих деревьев)
        Ансамбль генерирует заданное количество деревьев случайной глубины.
    """
    def __init__(self, name="forest", trees=80, min_tree_depth=5, max_tree_depth=25):
        """
        Parameters
        ----------
        name Метка модели
        trees 
            Количество деревьев в лесу
        min_tree_depth 
            Минимальная глубина дерева
        max_tree_depth 
            Максимальная глубина дерева
        """
        self.name = name
        self.trees = trees
        self.min_tree_depth = min_tree_depth
        self.max_tree_depth = max_tree_depth
        self.ensemble = []
    def train(self, training_data, training_labels):
        """
        Обучение леса
        Parameters
        ----------
        training_data 
            Данные для обучения
        training_labels
            Метки
        """
        self.ensemble = [tree.DecisionTreeClassifier(max_depth=randint(self.min_tree_depth, self.max_tree_depth))
                         for _ in range(self.trees)]
        for t in self.ensemble:
            t.fit(training_data, training_labels)
    def save(self):
        pickle.dump(self, open("data/Models/" + self.name, "wb"))
    def load(self):
        pickle.loads(self, open("data/Models/" + self.name, "rb"))
    def predict(self, data):
        """
        Классификация данных. 
        Parameters
        ----------
        data 
            Данные для классификации
            
        Returns
        -------
            Двумерный массив np.ndarray, в котором по строкам записаны элементы данных, по столбцам --- вероятность 
            принадлежности этого элемента к n-тому классу
        """
        prediction = list(map(lambda x: x.predict(data), self.ensemble))
        def max_p(d):
            """
                Функция классификации для одного элемента данных
            """
            #  Группируем данные и получаем количество вхождений в каждую из групп
            pred, count = np.unique(d, return_counts=True)
            z = np.zeros(d.shape)
            # Вероятность класса = количество предсказаний этого класса / количество деревьев в лесу
            z[pred] = count / self.trees
            return z
        # Находим вероятности классов для каждого из элементов данных
        return np.apply_along_axis(lambda x: max_p(x), 1, np.dstack(prediction)[0])
class DecisionForestSkLearn:
    """ Класс, предоставляющий модель решающего леса (ансамбля решающих деревьев)
        Ансамбль генерирует заданное количество деревьев случайной глубины.
    """
    def __init__(self, name="forest", trees=80, min_tree_depth=5, max_tree_depth=25):
        """
        Parameters
        ----------
        name Метка модели
        trees 
            Количество деревьев в лесу
        min_tree_depth 
            Минимальная глубина дерева
        max_tree_depth 
            Максимальная глубина дерева
        """
        self.name = name
        self.trees = trees
        self.min_tree_depth = min_tree_depth
        self.max_tree_depth = max_tree_depth
        self.forest = RandomForestClassifier(n_estimators=trees, max_depth=max_tree_depth)
    def train(self, training_data, training_labels):
        """
        Обучение леса
        Parameters
        ----------
        training_data 
            Данные для обучения
        training_labels
            Метки
        """
        self.forest.fit(training_data, training_labels)
    def save(self):
        pickle.dump(self, open("data/Models/" + self.name, "wb"))
    @staticmethod
    def load(name):
        return pickle.load(open("data/Models/" + name, "rb"))
    def predict(self, data):
        """
        Классификация данных. 
        Parameters
        ----------
        data 
            Данные для классификации
        Returns
        -------
            Двумерный массив np.ndarray, в котором по строкам записаны элементы данных, по столбцам --- вероятность 
            принадлежности этого элемента к n-тому классу
        """
        predicted_probabilities = self.forest.predict_proba(data)
        rearranged_probabilities = np.zeros((predicted_probabilities.shape[0], np.max(self.forest.classes_) + 1))
        rearranged_probabilities[:, self.forest.classes_] = predicted_probabilities
        return rearranged_probabilities
if __name__ == '__main__':
    dat = pickle.load(open("data/TrainingData/pickled_generated_sets", 'rb'))
    pca_dat = pickle.load(open("data/TrainingData/pickled_generated_sets_pca", 'rb'))
    dec_forest_pca = DecisionForestSkLearn("forest_pca")
    dec_forest_pca.train(pca_dat["train_data"], pca_dat["train_labels"])
    dec_forest_pca.save()
    print("Training Accuracy: {}".format(base_model.accuracy(dec_forest_pca, pca_dat["train_data"], pca_dat["train_labels"])))
    print("Accuracy:", base_model.accuracy(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"]))
    print("Precision:", base_model.precision(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"]))
    print("Recall:", base_model.recall(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"]))
    print("Confusion Matrix:", base_model.confusion_matrix(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"]))
    with open("data/Models/" + dec_forest_pca.name + "_props.txt", "w") as f:
        f.write("Training Accuracy: {}\n".format(
            base_model.accuracy(dec_forest_pca, pca_dat["train_data"], pca_dat["train_labels"])))
        f.write("Accuracy: {}\n".format(base_model.accuracy(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"])))
        f.write("Precision: {}\n".format(base_model.precision(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"])))
        f.write("Recall: {}\n".format(base_model.recall(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"])))
        f.write("Confusion Matrix:\n {}\n".format(
            base_model.confusion_matrix(dec_forest_pca, pca_dat["valid_data"], pca_dat["valid_labels"]))) | 
	mit | 
| 
	rvraghav93/scikit-learn | 
	sklearn/ensemble/tests/test_weight_boosting.py | 
	28 | 
	18031 | 
	"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1]    # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
                                     random_state=rng)
def test_samme_proba():
    # Test the `_samme_proba` helper function.
    # Define some example (bad) `predict_proba` output.
    probs = np.array([[1, 1e-6, 0],
                      [0.19, 0.6, 0.2],
                      [-999, 0.51, 0.5],
                      [1e-6, 1, 1e-9]])
    probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
    # _samme_proba calls estimator.predict_proba.
    # Make a mock object so I can control what gets returned.
    class MockEstimator(object):
        def predict_proba(self, X):
            assert_array_equal(X.shape, probs.shape)
            return probs
    mock = MockEstimator()
    samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
    assert_array_equal(samme_proba.shape, probs.shape)
    assert_true(np.isfinite(samme_proba).all())
    # Make sure that the correct elements come out as smallest --
    # `_samme_proba` should preserve the ordering in each example.
    assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
    assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
    # Test predict_proba robustness for one class label input.
    # In response to issue #7501
    # https://github.com/scikit-learn/scikit-learn/issues/7501
    y_t = np.ones(len(X))
    clf = AdaBoostClassifier().fit(X, y_t)
    assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
    # Check classification on a toy dataset.
    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg, random_state=0)
        clf.fit(X, y_class)
        assert_array_equal(clf.predict(T), y_t_class)
        assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
        assert_equal(clf.predict_proba(T).shape, (len(T), 2))
        assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
    # Check classification on a toy dataset.
    clf = AdaBoostRegressor(random_state=0)
    clf.fit(X, y_regr)
    assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
    # Check consistency on dataset iris.
    classes = np.unique(iris.target)
    clf_samme = prob_samme = None
    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg)
        clf.fit(iris.data, iris.target)
        assert_array_equal(classes, clf.classes_)
        proba = clf.predict_proba(iris.data)
        if alg == "SAMME":
            clf_samme = clf
            prob_samme = proba
        assert_equal(proba.shape[1], len(classes))
        assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
        score = clf.score(iris.data, iris.target)
        assert score > 0.9, "Failed with algorithm %s and score = %f" % \
            (alg, score)
        # Check we used multiple estimators
        assert_greater(len(clf.estimators_), 1)
        # Check for distinct random states (see issue #7408)
        assert_equal(len(set(est.random_state for est in clf.estimators_)),
                     len(clf.estimators_))
    # Somewhat hacky regression test: prior to
    # ae7adc880d624615a34bafdb1d75ef67051b8200,
    # predict_proba returned SAMME.R values for SAMME.
    clf_samme.algorithm = "SAMME.R"
    assert_array_less(0,
                      np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
    # Check consistency on dataset boston house prices.
    reg = AdaBoostRegressor(random_state=0)
    reg.fit(boston.data, boston.target)
    score = reg.score(boston.data, boston.target)
    assert score > 0.85
    # Check we used multiple estimators
    assert_true(len(reg.estimators_) > 1)
    # Check for distinct random states (see issue #7408)
    assert_equal(len(set(est.random_state for est in reg.estimators_)),
                 len(reg.estimators_))
def test_staged_predict():
    # Check staged predictions.
    rng = np.random.RandomState(0)
    iris_weights = rng.randint(10, size=iris.target.shape)
    boston_weights = rng.randint(10, size=boston.target.shape)
    # AdaBoost classification
    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
        clf.fit(iris.data, iris.target, sample_weight=iris_weights)
        predictions = clf.predict(iris.data)
        staged_predictions = [p for p in clf.staged_predict(iris.data)]
        proba = clf.predict_proba(iris.data)
        staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
        score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
        staged_scores = [
            s for s in clf.staged_score(
                iris.data, iris.target, sample_weight=iris_weights)]
        assert_equal(len(staged_predictions), 10)
        assert_array_almost_equal(predictions, staged_predictions[-1])
        assert_equal(len(staged_probas), 10)
        assert_array_almost_equal(proba, staged_probas[-1])
        assert_equal(len(staged_scores), 10)
        assert_array_almost_equal(score, staged_scores[-1])
    # AdaBoost regression
    clf = AdaBoostRegressor(n_estimators=10, random_state=0)
    clf.fit(boston.data, boston.target, sample_weight=boston_weights)
    predictions = clf.predict(boston.data)
    staged_predictions = [p for p in clf.staged_predict(boston.data)]
    score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
    staged_scores = [
        s for s in clf.staged_score(
            boston.data, boston.target, sample_weight=boston_weights)]
    assert_equal(len(staged_predictions), 10)
    assert_array_almost_equal(predictions, staged_predictions[-1])
    assert_equal(len(staged_scores), 10)
    assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
    # Check that base trees can be grid-searched.
    # AdaBoost classification
    boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
    parameters = {'n_estimators': (1, 2),
                  'base_estimator__max_depth': (1, 2),
                  'algorithm': ('SAMME', 'SAMME.R')}
    clf = GridSearchCV(boost, parameters)
    clf.fit(iris.data, iris.target)
    # AdaBoost regression
    boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
                              random_state=0)
    parameters = {'n_estimators': (1, 2),
                  'base_estimator__max_depth': (1, 2)}
    clf = GridSearchCV(boost, parameters)
    clf.fit(boston.data, boston.target)
def test_pickle():
    # Check pickability.
    import pickle
    # Adaboost classifier
    for alg in ['SAMME', 'SAMME.R']:
        obj = AdaBoostClassifier(algorithm=alg)
        obj.fit(iris.data, iris.target)
        score = obj.score(iris.data, iris.target)
        s = pickle.dumps(obj)
        obj2 = pickle.loads(s)
        assert_equal(type(obj2), obj.__class__)
        score2 = obj2.score(iris.data, iris.target)
        assert_equal(score, score2)
    # Adaboost regressor
    obj = AdaBoostRegressor(random_state=0)
    obj.fit(boston.data, boston.target)
    score = obj.score(boston.data, boston.target)
    s = pickle.dumps(obj)
    obj2 = pickle.loads(s)
    assert_equal(type(obj2), obj.__class__)
    score2 = obj2.score(boston.data, boston.target)
    assert_equal(score, score2)
def test_importances():
    # Check variable importances.
    X, y = datasets.make_classification(n_samples=2000,
                                        n_features=10,
                                        n_informative=3,
                                        n_redundant=0,
                                        n_repeated=0,
                                        shuffle=False,
                                        random_state=1)
    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg)
        clf.fit(X, y)
        importances = clf.feature_importances_
        assert_equal(importances.shape[0], 10)
        assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
                     True)
def test_error():
    # Test that it gives proper exception on deficient input.
    assert_raises(ValueError,
                  AdaBoostClassifier(learning_rate=-1).fit,
                  X, y_class)
    assert_raises(ValueError,
                  AdaBoostClassifier(algorithm="foo").fit,
                  X, y_class)
    assert_raises(ValueError,
                  AdaBoostClassifier().fit,
                  X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
    # Test different base estimators.
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.svm import SVC
    # XXX doesn't work with y_class because RF doesn't support classes_
    # Shouldn't AdaBoost run a LabelBinarizer?
    clf = AdaBoostClassifier(RandomForestClassifier())
    clf.fit(X, y_regr)
    clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
    clf.fit(X, y_class)
    from sklearn.ensemble import RandomForestRegressor
    from sklearn.svm import SVR
    clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
    clf.fit(X, y_regr)
    clf = AdaBoostRegressor(SVR(), random_state=0)
    clf.fit(X, y_regr)
    # Check that an empty discrete ensemble fails in fit, not predict.
    X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
    y_fail = ["foo", "bar", 1, 2]
    clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
    assert_raises_regexp(ValueError, "worse than random",
                         clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
    from sklearn.linear_model import LogisticRegression
    from sklearn.cluster import KMeans
    clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
    assert_raises(ValueError, clf.fit, X, y_regr)
    clf = AdaBoostRegressor(KMeans())
    assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
    # Check classification with sparse input.
    class CustomSVC(SVC):
        """SVC variant that records the nature of the training set."""
        def fit(self, X, y, sample_weight=None):
            """Modification on fit caries data type for later verification."""
            super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
            self.data_type_ = type(X)
            return self
    X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
                                                   n_features=5,
                                                   random_state=42)
    # Flatten y to a 1d array
    y = np.ravel(y)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
                          dok_matrix]:
        X_train_sparse = sparse_format(X_train)
        X_test_sparse = sparse_format(X_test)
        # Trained on sparse format
        sparse_classifier = AdaBoostClassifier(
            base_estimator=CustomSVC(probability=True),
            random_state=1,
            algorithm="SAMME"
        ).fit(X_train_sparse, y_train)
        # Trained on dense format
        dense_classifier = AdaBoostClassifier(
            base_estimator=CustomSVC(probability=True),
            random_state=1,
            algorithm="SAMME"
        ).fit(X_train, y_train)
        # predict
        sparse_results = sparse_classifier.predict(X_test_sparse)
        dense_results = dense_classifier.predict(X_test)
        assert_array_equal(sparse_results, dense_results)
        # decision_function
        sparse_results = sparse_classifier.decision_function(X_test_sparse)
        dense_results = dense_classifier.decision_function(X_test)
        assert_array_equal(sparse_results, dense_results)
        # predict_log_proba
        sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
        dense_results = dense_classifier.predict_log_proba(X_test)
        assert_array_equal(sparse_results, dense_results)
        # predict_proba
        sparse_results = sparse_classifier.predict_proba(X_test_sparse)
        dense_results = dense_classifier.predict_proba(X_test)
        assert_array_equal(sparse_results, dense_results)
        # score
        sparse_results = sparse_classifier.score(X_test_sparse, y_test)
        dense_results = dense_classifier.score(X_test, y_test)
        assert_array_equal(sparse_results, dense_results)
        # staged_decision_function
        sparse_results = sparse_classifier.staged_decision_function(
            X_test_sparse)
        dense_results = dense_classifier.staged_decision_function(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)
        # staged_predict
        sparse_results = sparse_classifier.staged_predict(X_test_sparse)
        dense_results = dense_classifier.staged_predict(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)
        # staged_predict_proba
        sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
        dense_results = dense_classifier.staged_predict_proba(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)
        # staged_score
        sparse_results = sparse_classifier.staged_score(X_test_sparse,
                                                        y_test)
        dense_results = dense_classifier.staged_score(X_test, y_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)
        # Verify sparsity of data is maintained during training
        types = [i.data_type_ for i in sparse_classifier.estimators_]
        assert all([(t == csc_matrix or t == csr_matrix)
                   for t in types])
def test_sparse_regression():
    # Check regression with sparse input.
    class CustomSVR(SVR):
        """SVR variant that records the nature of the training set."""
        def fit(self, X, y, sample_weight=None):
            """Modification on fit caries data type for later verification."""
            super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
            self.data_type_ = type(X)
            return self
    X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
                                    random_state=42)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
                          dok_matrix]:
        X_train_sparse = sparse_format(X_train)
        X_test_sparse = sparse_format(X_test)
        # Trained on sparse format
        sparse_classifier = AdaBoostRegressor(
            base_estimator=CustomSVR(),
            random_state=1
        ).fit(X_train_sparse, y_train)
        # Trained on dense format
        dense_classifier = dense_results = AdaBoostRegressor(
            base_estimator=CustomSVR(),
            random_state=1
        ).fit(X_train, y_train)
        # predict
        sparse_results = sparse_classifier.predict(X_test_sparse)
        dense_results = dense_classifier.predict(X_test)
        assert_array_equal(sparse_results, dense_results)
        # staged_predict
        sparse_results = sparse_classifier.staged_predict(X_test_sparse)
        dense_results = dense_classifier.staged_predict(X_test)
        for sprase_res, dense_res in zip(sparse_results, dense_results):
            assert_array_equal(sprase_res, dense_res)
        types = [i.data_type_ for i in sparse_classifier.estimators_]
        assert all([(t == csc_matrix or t == csr_matrix)
                   for t in types])
def test_sample_weight_adaboost_regressor():
    """
    AdaBoostRegressor should work without sample_weights in the base estimator
    The random weighted sampling is done internally in the _boost method in
    AdaBoostRegressor.
    """
    class DummyEstimator(BaseEstimator):
        def fit(self, X, y):
            pass
        def predict(self, X):
            return np.zeros(X.shape[0])
    boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
    boost.fit(X, y_regr)
    assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
 | 
	bsd-3-clause | 
| 
	xyficu/rts2 | 
	scripts/shiftstore.py | 
	1 | 
	9390 | 
	#!/usr/bin/python
#
# Shift-store focusing.
#
# You will need: scipy matplotlib sextractor
# This should work on Debian/ubuntu:
# sudo apt-get install python-matplotlib python-scipy python-pyfits sextractor
#
# If you would like to see sextractor results, get DS9 and pyds9:
#
# http://hea-www.harvard.edu/saord/ds9/
#
# Please be aware that current sextractor Ubuntu packages does not work
# properly. The best workaround is to install package, and then overwrite
# sextractor binary with one compiled from sources (so you will have access
# to sextractor configuration files, which program assumes).
#
# (C) 2011  Petr Kubanek, Institute of Physics <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA  02111-1307, USA.
import sextractor
import sys
import focusing
import numpy
from scipy import array
class ShiftStore:
	"""
	Shift-store focusing. Works in the following steps
	    - extract sources (with sextractor)
	    - order sources by magnitude/flux
	    - try to find row for the brightests source
	      - filter all sources too far away in X
	      - try to find sequence, assuming selected source can be any in
		the sequence
	    - run standard fit on images (from focusing.py) Parameters
	      governing the algorithm are specified in ShiftStore constructor.
	Please beware - when using paritial match, you need to make sure the shifts will allow
	unique identification of the sources. It is your responsibility to make sure they allow so,
	the algorithm does not check for this.
	"""
	def __init__(self,shifts=[100,50,50,50,50,50,50,50],horizontal=True):
		"""
		@param shifts      shifts performed between exposures, in pixels. Lenght of this array is equal to ((number of sources in a row) - 1).
		@param horizontal  search for horizontal trails (paraller to Y axis). If set to False, search for veritical trails (along X axis).
		"""
		self.horizontal = horizontal
		self.objects = None
		self.sequences = []
		self.xsep = self.ysep = 5
		self.shifts = shifts
		self.focpos = range(0,len(self.shifts) + 1)
	def testObjects(self,x,can,i,otherc,partial_len=None):
		"""
		Test if there is sequence among candidates matching expected
		shifts.  Candidates are stars not far in X coordinate from
		source (here we assume we are looking for vertical lines on
		image; horizontal lines are searched just by changing otherc parameter
		to the other axis index). Those are ordered by y and searched for stars at
		expected positions. If multiple stars falls inside this box,
		then the one closest in magnitude/brightness estimate is
		selected.
		@param x       star sextractor row; ID X Y brightness estimate postion are expected and used
		@param can     catalogue of candidate stars
		@param i       expected star index in shift pattern
		@param otherc  index of other axis. Either 2 or 1 (for vertical or horizontal shifts)
		@param partial_len allow partial matches (e.g. where stars are missing)
		"""
		ret = []
		# here we assume y is otherc (either second or third), and some brightnest estimate fourth member in x
		yi = x[otherc]  # expected other axis position
		xb = x[3]  # expected brightness
		# calculate first expected shift..
		for j in range(0,i):
			yi -= self.shifts[j]
		# go through list, check for candidate stars..
		cs = None
		sh = 0
		pl = 0  # number of partial matches..
		while sh <= len(self.shifts):
			# now interate through candidates
			for j in range(0,len(can)):
			  	# if the current shift index is equal to expected source position...
				if sh == i:
					# append x to sequence, and increase sh (and expected Y position)
					try:
						yi += self.shifts[sh]
					except IndexError,ie:
						break
					sh += 1
					ret.append(x)
			  	# get close enough..
				if abs(can[j][otherc] - yi) < self.ysep:
					# find all other sources in vicinity..
			  		k = None
					cs = can[j] # _c_andidate _s_tar
					for k in range(j+1,len(can)):
					  	# something close enough..
						if abs(can[k][otherc] - yi) < self.ysep:
							if abs(can[k][3] - xb) < abs (cs[3] - xb):
								cs = can[k]
						else:
							continue
					# append candidate star
					ret.append(cs)
					if k is not None:
						j = k
					# don't exit if the algorithm make it to end of shifts
					try:
						yi += self.shifts[sh]
					except IndexError,ie:
						break
					sh += 1
			# no candiate exists
			if partial_len is None:
				if len(ret) == len(self.shifts) + 1:
					return ret
				return None
			else:
				if len(ret) == len(self.shifts) + 1:
					return ret
			# insert dummy postion..
			if otherc == 2:
				ret.append([None,x[1],yi,None])
			else:
				ret.append([None,yi,x[2],None])
			pl += 1
			try:
				yi += self.shifts[sh]
			except IndexError,ie:
				break
			sh += 1
		# partial_len is not None
		if (len(self.shifts) + 1 - pl) >= partial_len:
			return ret
		return None
	def findRowObjects(self,x,partial_len=None):
		"""
		Find objects in row. Search for sequence of stars, where x fit
		as one member. Return the sequence, or None if the sequence
		cannot be found."""
		xid = x[0]   # running number
		searchc = 1
		otherc = 2
		if not(self.horizontal):
			searchc = 2
			otherc = 1
		xcor = x[searchc]
		can = []     # canditate stars
		for y in self.objects:
			if xid != y[0] and abs(xcor - y[searchc]) < self.xsep:
				can.append(y)
		# sort by Y axis..
		can.sort(cmp=lambda x,y: cmp(x[otherc],y[otherc]))
		# assume selected object is one in shift sequence
		# place it at any possible position in shift sequence, and test if the sequence can be found
		max_ret = []
		for i in range(0,len(self.shifts) + 1):
			# test if sequence can be found..
			ret = self.testObjects(x,can,i,otherc,partial_len)
			# and if it is found, return it
			if ret is not None:
				if partial_len is None:
					return ret
				elif len(ret) > len(max_ret):
					max_ret = ret
		if partial_len is not None and len(max_ret) >= partial_len:
			return max_ret
		# cannot found sequnce, so return None
		return None
	def runOnImage(self,fn,partial_len=None,interactive=False,sequences_num=15,mag_limit_num=7):
		"""
		Run algorithm on image. Extract sources with sextractor, and
		pass them through sequence finding algorithm, and fit focusing position.
		"""
		c = sextractor.Sextractor(['NUMBER','X_IMAGE','Y_IMAGE','MAG_BEST','FLAGS','CLASS_STAR','FWHM_IMAGE','A_IMAGE','B_IMAGE'],sexpath='/usr/bin/sextractor',sexconfig='/usr/share/sextractor/default.sex',starnnw='/usr/share/sextractor/default.nnw')
		c.runSExtractor(fn)
		self.objects = c.objects
		# sort by flux/brightness
		self.objects.sort(cmp=lambda x,y:cmp(x[3],y[3]))
		print 'from {0} extracted {1} sources'.format(fn,len(c.objects))
		d = None
		if interactive:
	  		d = ds9()
			# display in ds9
			d.set('file {0}'.format(fn))
			for x in self.objects:
				d.set('regions','image; point {0} {1} # point=x 5 color=red'.format(x[1],x[2]))
		sequences = []
		usednum = []
		for x in self.objects:
			# do not examine already used objects..
			if x[0] in usednum:
				continue
		  	# find object in a row..
			b = self.findRowObjects(x,partial_len)
			if b is None:
				continue
			sequences.append(b)
			if d:
				d.set('regions select none')
				d.set('regions','image; circle {0} {1} 20 # color=yellow tag = sel'.format(x[1],x[2]))
			for obj in b:
				usednum.append(obj[0])
			if d:
				print 'best mag: ',x[3]
				d.set('regions select group sel')
				d.set('regions delete select')
				for obj in b:
					if obj[0] is None:
						d.set('regions','image; point {0} {1} # point=boxcircle 15 color = red'.format(obj[1],obj[2]))
					else:
						d.set('regions','image; circle {0} {1} 10 # color = green'.format(obj[1],obj[2]))
			if len(sequences) > sequences_num:
				break
		# if enough sequences were found, process them and try to fit results
		if len(sequences) > sequences_num:
			# get median of FWHM from each sequence
			fwhm=[]
			for x in range(0,len(self.shifts) + 1):
				fa = []
				for o in sequences:
					if o[x][0] is not None:
						fa.append(o[x][6])
				# if length of magnitude estimates is greater then limit..
				if len(fa) >= mag_limit_num:
					m = numpy.median(fa)
					fwhm.append(m)
				else:
					if interactive:
						print 'removing focuser position, because not enough matches were found',self.focpos[x]
					self.focpos.remove(self.focpos[x])
			# fit it
			foc = focusing.Focusing()
			res,ftype = foc.doFitOnArrays(fwhm,self.focpos,focusing.H2)
			if interactive:
				print res,ftype
				foc.plotFit(res,ftype)
if __name__ == "__main__":
  	# test method
	from ds9 import *
	# full match
	sc = ShiftStore()
  	for fn in sys.argv[1:]:
		sc.runOnImage(fn,None,True)
	# partial match
	#sc = ShiftStore([25,75,50,50])
  	#for fn in sys.argv[1:]:
	#	sc.runOnImage(fn,3,True)
 | 
	gpl-2.0 | 
| 
	astromme/classify-handwritten-characters | 
	points_shape_plotter.py | 
	1 | 
	1566 | 
	#!/usr/bin/env python3
'''
This python program reads in all the source character data, determining the
length of each character and plots them in a histogram.
This is used to help determine the bucket sizes to be used in the main program.
'''
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
import sys
from utils import read_pot_in_directory
num_bins = 50
def main():
    h_w_ratios = []
    count = 0
    for strokes, tagcode in read_pot_in_directory('OLHWDB1.1tst_pot/'):
        xmin = sys.maxsize
        ymin = sys.maxsize
        xmax = -sys.maxsize
        ymax = -sys.maxsize
        for stroke in strokes:
            for point in stroke:
                if len(point) < 2:
                    continue
                x, y = point
                xmin = min(xmin, x)
                ymin = min(ymin, y)
                xmax = max(xmax, x)
                ymax = max(ymax, y)
        h_w_ratios.append((ymax-ymin) / (xmax-xmin))
        count += 1
        if count % 1000 == 0:
            print("processed {} samples".format(count))
        if count > 50000:
            break
    mu = np.std(h_w_ratios)
    sigma = np.mean(h_w_ratios)
    x = np.array(h_w_ratios)
    n, bins, patches = plt.hist(x,  num_bins, facecolor='green', alpha=0.5)
    y = mlab.normpdf(bins, mu, sigma)
    plt.plot(bins, y, 'r--')
    plt.title("Height to Width Ratios of Sequences")
    plt.xlabel("Height to Width Ratio")
    plt.ylabel("Number of Sequences")
    plt.xlim(0,5)
    plt.show()
if __name__=="__main__":
    main()
 | 
	mit | 
| 
	kratzert/RRMPG | 
	test/test_models.py | 
	1 | 
	10887 | 
	# -*- coding: utf-8 -*-
# This file is part of RRMPG.
#
# RRMPG is free software with the aim to provide a playground for experiments
# with hydrological rainfall-runoff-models while achieving competitive
# performance results.
#
# You should have received a copy of the MIT License along with RRMPG. If not,
# see <https://opensource.org/licenses/MIT>
import unittest
import os
import numpy as np
import pandas as pd
from rrmpg.models import ABCModel, HBVEdu, GR4J, Cemaneige, CemaneigeGR4J
from rrmpg.models.basemodel import BaseModel
class TestBaseModelFunctions(unittest.TestCase):
    """Test all functions implemented in the BaseModel.
    
    These will not be tested for all models, but one as an example for one of 
    the models.
    """
    
    def setUp(self):
        self.model = ABCModel()
        self.param_names = ['a', 'b', 'c']
        self.default_bounds =  {'a': (0, 1),
                                'b': (0, 1),
                                'c': (0, 1)}
        self.dtype = np.dtype([('a', np.float64),
                               ('b', np.float64),
                               ('c', np.float64)])
        unittest.TestCase.setUp(self)
        
    def test_get_parameter_names(self):
        self.assertEqual(self.model.get_parameter_names(), self.param_names)
        
    def test_get_params(self):
        params = self.model.get_params()
        for param in self.param_names:
            msg = "Failed, because '{}' not in '{}'".format(param, params.keys)
            self.assertIn(param, params, msg)
            
    def test_get_default_bounds(self):
        bounds = self.model.get_default_bounds()
        self.assertDictEqual(bounds, self.default_bounds)
        
    def test_get_dtype(self):
        self.assertEqual(self.dtype, self.model.get_dtype())
        
    def test_random_params_in_default_bounds(self):
        params = self.model.get_random_params()
        bnds = self.default_bounds
        
        for p in self.param_names:
            msg = ["Failed for param: '{}', which has a ".format(p),
                   "a value of {}, but lower bounds ".format(params[p][0]),
                   "is {} and upper bound {}.".format(bnds[p][0], bnds[p][1])]
            self.assertTrue(bnds[p][0] <= params[p][0] <= bnds[p][1], 
                            "".join(msg))
    
    def test_get_multiple_random_param_sets(self):
        num = 24
        params = self.model.get_random_params(num=num)
        self.assertEqual(num, params.size)
              
    def test_set_params(self):
        rand_params = self.model.get_random_params()
        # convert rand_params array to dict:
        params = {}
        for p in self.param_names:
            params[p] = rand_params[p][0]
        self.model.set_params(params)
        self.assertDictEqual(params, self.model.get_params()) 
        
        
class TestABCModel(unittest.TestCase):
    """Test ABC-Model specific functions."""
    
    def setUp(self):
        self.model = ABCModel()
        unittest.TestCase.setUp(self)
        
    def test_model_subclass_of_basemodel(self):
        self.assertTrue(issubclass(self.model.__class__, BaseModel))
        
    def test_simulate_zero_rain(self):
        qsim = self.model.simulate(np.zeros(100))
        self.assertEqual(np.sum(qsim), 0)
        
    def test_simulate_negative_rain(self):
        with self.assertRaises(ValueError) as context:
            self.model.simulate([-1,1,1])
        expr = ("In the precipitation array are negative values." in
                str(context.exception)) 
        self.assertTrue(expr)
class TestHBVEdu(unittest.TestCase):
    """Test HBVEdu specific functions."""
    
    def setUp(self):
        # parameter set see https://github.com/kratzert/RRMPG/issues/10
        params = {'T_t': 0,
                  'DD': 4.25,
                  'FC': 177.1,
                  'Beta': 2.35,
                  'C': 0.02,
                  'PWP': 105.89,
                  'K_0': 0.05,
                  'K_1': 0.03,
                  'K_2': 0.02,
                  'K_p': 0.05,
                  'L': 4.87}
        self.model = HBVEdu(params=params)
        
    def test_model_subclass_of_basemodel(self):
        self.assertTrue(issubclass(self.model.__class__, BaseModel))    
        
    def test_simulate_zero_rain(self):
        qsim = self.model.simulate(temp=np.random.uniform(-15,25,100),
                                   prec=np.zeros(100),
                                   month=np.random.randint(1,12,100),
                                   PE_m=np.random.uniform(0,4,12),
                                   T_m=np.random.uniform(-5,15,12))
        self.assertEqual(np.sum(qsim), 0)
        
    def test_simulate_negative_rain(self):
        with self.assertRaises(ValueError) as context:
            self.model.simulate(temp=np.random.uniform(-15,25,100),
                                prec=np.arange(-1,99),
                                month=np.random.randint(1,12,100),
                                PE_m=np.random.uniform(0,4,12),
                                T_m=np.random.uniform(-5,15,12))
        expr = ("In the precipitation array are negative values." in
                str(context.exception)) 
        self.assertTrue(expr)
        
    def test_simulated_against_validation_data(self):
        test_dir = os.path.dirname(__file__)
        daily_file = os.path.join(test_dir, 'data', 'hbv_daily_inputs.txt')
        daily_inputs = pd.read_csv(daily_file, sep='\t',
                                   names=['date', 'month', 'temp', 'prec'])
        monthly_file = os.path.join(test_dir, 'data', 'hbv_monthly_inputs.txt')
        monthly_inputs = pd.read_csv(monthly_file, sep=' ', 
                                     names=['temp', 'not_needed', 'evap'])
        
        qsim_matlab_file = os.path.join(test_dir, 'data', 'hbv_qsim.csv')
        qsim_matlab = pd.read_csv(qsim_matlab_file, header=None, 
                                  names=['qsim'])
        # fix parameters from provided MATLAB code from HBV paper
        area = 410
        soil_init = 100
        s1_init = 3
        s2_init = 10
        
        qsim = self.model.simulate(temp=daily_inputs.temp, 
                                   prec=daily_inputs.prec, 
                                   month=daily_inputs.month, 
                                   PE_m=monthly_inputs.evap, 
                                   T_m=monthly_inputs.temp, 
                                   snow_init=0, 
                                   soil_init=soil_init, 
                                   s1_init=s1_init, 
                                   s2_init=s2_init, 
                                   return_storage=False)
        
        # rescale qsim from mm/d to m³/s
        qsim = (qsim * area * 1000) / (24*60*60)
       
        self.assertTrue(np.allclose(qsim.flatten(), qsim_matlab.qsim))
        
    
class TestGR4J(unittest.TestCase):
    """Test the GR4J Model.
    
    This model is validated against the Excel implementation provided by the 
    model authors.
    """
    
    def setUp(self):
        # parameters are taken from the excel sheet
        params = {'x1': np.exp(5.76865628090826), 
                  'x2': np.sinh(1.61742503661094), 
                  'x3': np.exp(4.24316129943456), 
                  'x4': np.exp(-0.117506799276908)+0.5}
        self.model = GR4J(params=params)
        
    def test_model_subclass_of_basemodel(self):
        self.assertTrue(issubclass(self.model.__class__, BaseModel))  
        
    def test_simulate_zero_rain(self):
        qsim = self.model.simulate(prec=np.zeros(100),
                                   etp=np.random.uniform(0,3,100),
                                   s_init=0, r_init=0)
        self.assertEqual(np.sum(qsim), 0)
        
    def test_simulate_compare_against_excel(self):
        # intial states are taken from excel
        s_init = 0.6
        r_init = 0.7
        test_dir = os.path.dirname(__file__)
        data_file = os.path.join(test_dir, 'data', 'gr4j_example_data.csv')
        data = pd.read_csv(data_file, sep=',')
        qsim = self.model.simulate(data.prec, data.etp, s_init=s_init, 
                                   r_init=r_init, return_storage=False)
        self.assertTrue(np.allclose(qsim.flatten(), data.qsim_excel))
        
class TestCemaneige(unittest.TestCase):
    """Test the Cemaneige snow routine.
    
    This model is validated against the Excel implementation provided by the 
    model authors.
    """
    
    def setUp(self):
        # parameters are taken from the excel sheet
        params = {'CTG': 0.25, 'Kf': 3.74}
        self.model = Cemaneige(params=params)
        
    def test_model_subclass_of_basemodel(self):
        self.assertTrue(issubclass(self.model.__class__, BaseModel))  
        
    def test_simulate_compare_against_excel(self):
        test_dir = os.path.dirname(__file__)
        data_file = os.path.join(test_dir, 'data', 
                                 'cemaneige_validation_data.csv')
        df = pd.read_csv(data_file, sep=';')
        qsim = self.model.simulate(df.precipitation, df.mean_temp, df.min_temp, 
                                   df.max_temp, met_station_height=495, 
                                   altitudes=[550, 620, 700, 785, 920])
        self.assertTrue(np.allclose(qsim.flatten(), 
                                    df.liquid_outflow.to_numpy()))
        
class TestCemaneigeGR4J(unittest.TestCase):
    """Test the Cemaneige + GR4J couple model.
    
    This model is validated against the Excel implementation provided by the 
    model authors.
    """
    
    def setUp(self):
        # parameters are taken from the excel sheet
        params = {'CTG': 0.25, 
                  'Kf': 3.74,
                  'x1': np.exp(5.25483021675164),
                  'x2': np.sinh(1.58209470624126),
                  'x3': np.exp(4.3853181982412),
                  'x4': np.exp(0.954786342674327)+0.5}
        self.model = CemaneigeGR4J(params=params)
        
    def test_model_subclass_of_basemodel(self):
        self.assertTrue(issubclass(self.model.__class__, BaseModel))  
        
    def test_simulate_compare_against_excel(self):
        test_dir = os.path.dirname(__file__)
        data_file = os.path.join(test_dir, 'data', 
                                 'cemaneigegr4j_validation_data.csv')
        df = pd.read_csv(data_file, sep=';', index_col=0)
        qsim = self.model.simulate(df.precipitation, df.mean_temp, df.min_temp, 
                                   df.max_temp, df.pe, met_station_height=495, 
                                   altitudes=[550, 620, 700, 785, 920],
                                   s_init=0.6, r_init=0.7)
        self.assertTrue(np.allclose(qsim.flatten(), 
                                    df.qsim.to_numpy())) | 
	mit | 
| 
	vdt/SimpleCV | 
	SimpleCV/Features/Features.py | 
	1 | 
	68872 | 
	# SimpleCV Feature library
#
# Tools return basic features in feature sets
# #    x = 0.00
#     y = 0.00
#     _mMaxX = None
#     _mMaxY = None
#     _mMinX = None
#     _mMinY = None
#     _mWidth = None
#     _mHeight = None
#     _mSrcImgW = None
#     mSrcImgH = None
#load system libraries
from SimpleCV.base import *
from SimpleCV.Color import *
import copy
class FeatureSet(list):
    """
    **SUMMARY**
    FeatureSet is a class extended from Python's list which has special functions so that it is useful for handling feature metadata on an image.
    In general, functions dealing with attributes will return numpy arrays, and functions dealing with sorting or filtering will return new FeatureSets.
    **EXAMPLE**
    >>> image = Image("/path/to/image.png")
    >>> lines = image.findLines()  #lines are the feature set
    >>> lines.draw()
    >>> lines.x()
    >>> lines.crop()
    """
    def __getitem__(self,key):
        """
        **SUMMARY**
        Returns a FeatureSet when sliced. Previously used to
        return list. Now it is possible to use FeatureSet member
        functions on sub-lists
        """
        if type(key) is types.SliceType: #Or can use 'try:' for speed
            return FeatureSet(list.__getitem__(self, key))
        else:
            return list.__getitem__(self,key)
    def __getslice__(self, i, j):
        """
        Deprecated since python 2.0, now using __getitem__
        """
        return self.__getitem__(slice(i,j))
    def count(self):
        '''
        This function returns the length / count of the all the items in the FeatureSet
        '''
        return len(self)
    def draw(self, color = Color.GREEN,width=1, autocolor = False):
        """
        **SUMMARY**
        Call the draw() method on each feature in the FeatureSet.
        **PARAMETERS**
        
        * *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
        * *width* - The width to draw the feature in pixels. A value of -1 usually indicates a filled region.
        * *autocolor* - If true a color is randomly selected for each feature.
        
        **RETURNS**
        Nada. Nothing. Zilch.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> feats.draw(color=Color.PUCE, width=3)
        >>> img.show()
        """
        for f in self:
            if(autocolor):
                color = Color().getRandom()
            f.draw(color=color,width=width)
    def show(self, color = Color.GREEN, autocolor = False,width=1):
        """
        **EXAMPLE**
        This function will automatically draw the features on the image and show it.
        It is a basically a shortcut function for development and is the same as:
        **PARAMETERS**
        * *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
        * *width* - The width to draw the feature in pixels. A value of -1 usually indicates a filled region.
        * *autocolor* - If true a color is randomly selected for each feature.
        **RETURNS**
        Nada. Nothing. Zilch.
        **EXAMPLE**
        >>> img = Image("logo")
        >>> feat = img.findBlobs()
        >>> if feat: feat.draw()
        >>> img.show()
        """
        self.draw(color, width, autocolor)
        self[-1].image.show()
    def reassignImage(self, newImg):
        """
        **SUMMARY**
        Return a new featureset where the features are assigned to a new image.
        **PARAMETERS**
        * *img* - the new image to which to assign the feature.
        .. Warning::
          THIS DOES NOT PERFORM A SIZE CHECK. IF YOUR NEW IMAGE IS NOT THE EXACT SAME SIZE YOU WILL CERTAINLY CAUSE ERRORS.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> img2 = img.invert()
        >>> l = img.findLines()
        >>> l2 = img.reassignImage(img2)
        >>> l2.show()
        """
        retVal = FeatureSet()
        for i in self:
            retVal.append(i.reassign(newImg))
        return retVal
    def x(self):
        """
        **SUMMARY**
        Returns a numpy array of the x (horizontal) coordinate of each feature.
        **RETURNS**
        A numpy array.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> xs = feats.x()
        >>> print xs
        """
        return np.array([f.x for f in self])
    def y(self):
        """
        **SUMMARY**
        Returns a numpy array of the y (vertical) coordinate of each feature.
        **RETURNS**
        A numpy array.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> xs = feats.y()
        >>> print xs
        """
        return np.array([f.y for f in self])
    def coordinates(self):
        """
        **SUMMARY**
        Returns a 2d numpy array of the x,y coordinates of each feature.  This
        is particularly useful if you want to use Scipy's Spatial Distance module
        **RETURNS**
        A numpy array of all the positions in the featureset.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> xs = feats.coordinates()
        >>> print xs
        """
        return np.array([[f.x, f.y] for f in self])
    def center(self):
        return self.coordinates()
    def area(self):
        """
        **SUMMARY**
        Returns a numpy array of the area of each feature in pixels.
        **RETURNS**
        A numpy array of all the positions in the featureset.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> xs = feats.area()
        >>> print xs
        """
        return np.array([f.area() for f in self])
    def sortArea(self):
        """
        **SUMMARY**
        Returns a new FeatureSet, with the largest area features first.
        **RETURNS**
        A featureset sorted based on area.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> feats = feats.sortArea()
        >>> print feats[-1] # biggest blob
        >>> print feats[0] # smallest blob
        """
        return FeatureSet(sorted(self, key = lambda f: f.area()))
    def sortX(self):
        """
        **SUMMARY**
        Returns a new FeatureSet, with the smallest x coordinates features first.
        **RETURNS**
        A featureset sorted based on area.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> feats = feats.sortX()
        >>> print feats[-1] # biggest blob
        >>> print feats[0] # smallest blob
        """
        return FeatureSet(sorted(self, key = lambda f: f.x))
    def sortY(self):
        """
        **SUMMARY**
        Returns a new FeatureSet, with the smallest y coordinates features first.
        **RETURNS**
        A featureset sorted based on area.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> feats = feats.sortY()
        >>> print feats[-1] # biggest blob
        >>> print feats[0] # smallest blob
        """
        return FeatureSet(sorted(self, key = lambda f: f.y)) 
    def distanceFrom(self, point = (-1, -1)):
        """
        **SUMMARY**
        Returns a numpy array of the distance each Feature is from a given coordinate.
        Default is the center of the image.
        **PARAMETERS**
        * *point* - A point on the image from which we will calculate distance.
        **RETURNS**
        A numpy array of distance values.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> d = feats.distanceFrom()
        >>> d[0]  #show the 0th blobs distance to the center.
        **TO DO**
        Make this accept other features to measure from.
        """
        if (point[0] == -1 or point[1] == -1 and len(self)):
            point = self[0].image.size()
        return spsd.cdist(self.coordinates(), [point])[:,0]
    def sortDistance(self, point = (-1, -1)):
        """
        **SUMMARY**
        Returns a sorted FeatureSet with the features closest to a given coordinate first.
        Default is from the center of the image.
        **PARAMETERS**
        * *point* - A point on the image from which we will calculate distance.
        **RETURNS**
        A numpy array of distance values.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> d = feats.sortDistance()
        >>> d[-1].show()  #show the 0th blobs distance to the center.
        """
        return FeatureSet(sorted(self, key = lambda f: f.distanceFrom(point)))
    def distancePairs(self):
        """
        **SUMMARY**
        Returns the square-form of pairwise distances for the featureset.
        The resulting N x N array can be used to quickly look up distances
        between features.
        **RETURNS**
        A NxN np matrix of distance values.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> feats = img.findBlobs()
        >>> d = feats.distancePairs()
        >>> print d
        """
        return spsd.squareform(spsd.pdist(self.coordinates()))
    def angle(self):
        """
        **SUMMARY**
        Return a numpy array of the angles (theta) of each feature.
        Note that theta is given in degrees, with 0 being horizontal.
        **RETURNS**
        An array of angle values corresponding to the features.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> l = img.findLines()
        >>> angs = l.angle()
        >>> print angs
        """
        return np.array([f.angle() for f in self])
    def sortAngle(self, theta = 0):
        """
        Return a sorted FeatureSet with the features closest to a given angle first.
        Note that theta is given in radians, with 0 being horizontal.
        **RETURNS**
        An array of angle values corresponding to the features.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> l = img.findLines()
        >>> l = l.sortAngle()
        >>> print angs
        """
        return FeatureSet(sorted(self, key = lambda f: abs(f.angle() - theta)))
    def length(self):
        """
        **SUMMARY**
        Return a numpy array of the length (longest dimension) of each feature.
        **RETURNS**
        A numpy array of the length, in pixels, of eatch feature object.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> l = img.findLines()
        >>> lengt = l.length()
        >>> lengt[0] # length of the 0th element.
        """
        return np.array([f.length() for f in self])
    def sortLength(self):
        """
        **SUMMARY**
        Return a sorted FeatureSet with the longest features first.
        **RETURNS**
        A sorted FeatureSet.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> l = img.findLines().sortLength()
        >>> lengt[-1] # length of the 0th element.
        """
        return FeatureSet(sorted(self, key = lambda f: f.length()))
    def meanColor(self):
        """
        **SUMMARY**
        Return a numpy array of the average color of the area covered by each Feature.
        **RETURNS**
        Returns an array of RGB triplets the correspond to the mean color of the feature.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> kp = img.findKeypoints()
        >>> c = kp.meanColor()
        """
        return np.array([f.meanColor() for f in self])
    def colorDistance(self, color = (0, 0, 0)):
        """
        **SUMMARY**
        Return a numpy array of the distance each features average color is from
        a given color tuple (default black, so colorDistance() returns intensity)
        **PARAMETERS**
        * *color* - The color to calculate the distance from.
        **RETURNS**
        The distance of the average color for the feature from given color as a numpy array.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> circs = img.findCircle()
        >>> d = circs.colorDistance(color=Color.BLUE)
        >>> print d
        """
        return spsd.cdist(self.meanColor(), [color])[:,0]
    def sortColorDistance(self, color = (0, 0, 0)):
        """
        Return a sorted FeatureSet with features closest to a given color first.
        Default is black, so sortColorDistance() will return darkest to brightest
        """
        return FeatureSet(sorted(self, key = lambda f: f.colorDistance(color)))
    def filter(self, filterarray):
        """
        **SUMMARY**
        Return a FeatureSet which is filtered on a numpy boolean array.  This
        will let you use the attribute functions to easily screen Features out
        of return FeatureSets.
        **PARAMETERS**
        * *filterarray* - A numpy array, matching  the size of the feature set,
          made of Boolean values, we return the true values and reject the False value.
        **RETURNS**
        The revised feature set.
        **EXAMPLE**
        Return all lines < 200px
        >>> my_lines.filter(my_lines.length() < 200) # returns all lines < 200px
        >>> my_blobs.filter(my_blobs.area() > 0.9 * my_blobs.length**2) # returns blobs that are nearly square
        >>> my_lines.filter(abs(my_lines.angle()) < numpy.pi / 4) #any lines within 45 degrees of horizontal
        >>> my_corners.filter(my_corners.x() - my_corners.y() > 0) #only return corners in the upper diagonal of the image
        """
        return FeatureSet(list(np.array(self)[np.array(filterarray)]))
    def width(self):
        """
        **SUMMARY**
        Returns a nparray which is the width of all the objects in the FeatureSet.
        **RETURNS**
        A numpy array of width values.
        **EXAMPLE**
        >>> img = Image("NotLenna")
        >>> l = img.findLines()
        >>> l.width()
        """
        return np.array([f.width() for f in self])
    def height(self):
        """
        Returns a nparray which is the height of all the objects in the FeatureSet
        **RETURNS**
        A numpy array of width values.
        **EXAMPLE**
        >>> img = Image("NotLenna")
        >>> l = img.findLines()
        >>> l.height()
        """
        return np.array([f.height() for f in self])
    def crop(self):
        """
        **SUMMARY**
        Returns a nparray with the cropped features as SimpleCV image.
        **RETURNS**
        A SimpleCV image cropped to each image.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>   newImg = b.crop()
        >>>   newImg.show()
        >>>   time.sleep(1)
        """
        return np.array([f.crop() for f in self])
    def inside(self,region):
        """
        **SUMMARY**
        Return only the features inside the region. where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that are inside the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> inside = lines.inside(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if(f.isContainedWithin(region)):
                fs.append(f)
        return fs
    def outside(self,region):
        """
        **SUMMARY**
        Return only the features outside the region. where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that are outside the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> outside = lines.outside(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if(f.isNotContainedWithin(region)):
                fs.append(f)
        return fs
    def overlaps(self,region):
        """
        **SUMMARY**
        Return only the features that overlap or the region. Where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that overlap the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> outside = lines.overlaps(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if( f.overlaps(region) ):
                fs.append(f)
        return fs
    def above(self,region):
        """
        **SUMMARY**
        Return only the features that are above a  region. Where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that are above the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> outside = lines.above(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if(f.above(region)):
                fs.append(f)
        return fs
    def below(self,region):
        """
        **SUMMARY**
        Return only the features below the region. where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that are below the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> inside = lines.below(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if(f.below(region)):
                fs.append(f)
        return fs
    def left(self,region):
        """
        **SUMMARY**
        Return only the features left of the region. where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that are left of the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> left = lines.left(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if(f.left(region)):
                fs.append(f)
        return fs
    def right(self,region):
        """
        **SUMMARY**
        Return only the features right of the region. where region can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *region*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a featureset of features that are right of the region.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[-1]
        >>> lines = img.findLines()
        >>> right = lines.right(b)
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        fs = FeatureSet()
        for f in self:
            if(f.right(region)):
                fs.append(f)
        return fs
    def onImageEdge(self, tolerance=1):
        """
        **SUMMARY**
        The method returns a feature set of features that are on or "near" the edge of
        the image. This is really helpful for removing features that are edge effects.
        **PARAMETERS**
        * *tolerance* - the distance in pixels from the edge at which a feature
          qualifies as being "on" the edge of the image.
        **RETURNS**
        Returns a featureset of features that are on the edge of the image.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> es = blobs.onImageEdge()
        >>> es.draw(color=Color.RED)
        >>> img.show()
        """
        fs = FeatureSet()
        for f in self:
            if(f.onImageEdge(tolerance)):
                fs.append(f)
        return fs
    def notOnImageEdge(self, tolerance=1):
        """
        **SUMMARY**
        The method returns a feature set of features that are not on or "near" the edge of
        the image. This is really helpful for removing features that are edge effects.
        **PARAMETERS**
        * *tolerance* - the distance in pixels from the edge at which a feature
          qualifies as being "on" the edge of the image.
        **RETURNS**
        Returns a featureset of features that are not on the edge of the image.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> es = blobs.notOnImageEdge()
        >>> es.draw(color=Color.RED)
        >>> img.show()
        """
        fs = FeatureSet()
        for f in self:
            if(f.notOnImageEdge(tolerance)):
                fs.append(f)
        return fs
    def topLeftCorners(self):
        """
        **SUMMARY**
        This method returns the top left corner of each feature's bounding box.
        **RETURNS**
        A numpy array of x,y position values.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> tl = img.topLeftCorners()
        >>> print tl[0]
        """
        return np.array([f.topLeftCorner() for f in self])
    def bottomLeftCorners(self):
        """
        **SUMMARY**
        This method returns the bottom left corner of each feature's bounding box.
        **RETURNS**
        A numpy array of x,y position values.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> bl = img.bottomLeftCorners()
        >>> print bl[0]
        """
        return np.array([f.bottomLeftCorner() for f in self])
    def topLeftCorners(self):
        """
        **SUMMARY**
        This method returns the top left corner of each feature's bounding box.
        **RETURNS**
        A numpy array of x,y position values.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> tl = img.bottomLeftCorners()
        >>> print tl[0]
        """
        return np.array([f.topLeftCorner() for f in self])
    def topRightCorners(self):
        """
        **SUMMARY**
        This method returns the top right corner of each feature's bounding box.
        **RETURNS**
        A numpy array of x,y position values.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> tr = img.topRightCorners()
        >>> print tr[0]
        """
        return np.array([f.topRightCorner() for f in self])
    def bottomRightCorners(self):
        """
        **SUMMARY**
        This method returns the bottom right corner of each feature's bounding box.
        **RETURNS**
        A numpy array of x,y position values.
        **EXAMPLE**
        >>> img = Image("./sampleimages/EdgeTest1.png")
        >>> blobs = img.findBlobs()
        >>> br = img.bottomRightCorners()
        >>> print br[0]
        """
        return np.array([f.bottomRightCorner() for f in self])
    def aspectRatios(self):
        """
        **SUMMARY**
        Return the aspect ratio of all the features in the feature set, For our purposes
        aspect ration is max(width,height)/min(width,height).
        **RETURNS**
        A numpy array of the aspect ratio of the features in the featureset.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs.aspectRatio()
        """
        return np.array([f.aspectRatio() for f in self])
    def cluster(self,method="kmeans",properties=None,k=3):
        """
        
        **SUMMARY**
        This function clusters the blobs in the featureSet based on the properties. Properties can be "color", "shape" or "position" of blobs.
        Clustering is done using K-Means or Hierarchical clustering(Ward) algorithm.
        **PARAMETERS**
        
        * *properties* - It should be a list with any combination of "color", "shape", "position". properties = ["color","position"]. properties = ["position","shape"]. properties = ["shape"]
        * *method* - if method is "kmeans", it will cluster using K-Means algorithm, if the method is "hierarchical", no need to spicify the number of clusters
        * *k* - The number of clusters(kmeans).
        
        **RETURNS**
        A list of featureset, each being a cluster itself.
        **EXAMPLE**
          >>> img = Image("lenna")
          >>> blobs = img.findBlobs()
          >>> clusters = blobs.cluster(method="kmeans",properties=["color"],k=5)
          >>> for i in clusters:
          >>>     i.draw(color=Color.getRandom(),width=5)
          >>> img.show()
        
        """
        try :
            from sklearn.cluster import KMeans, Ward
            from sklearn import __version__
        except :
            logger.warning("install scikits-learning package")
            return
        X = [] #List of feature vector of each blob
        if not properties:
            properties = ['color','shape','position']
        if k > len(self):
            logger.warning("Number of clusters cannot be greater then the number of blobs in the featureset")
            return
        for i in self:
            featureVector = []
            if 'color' in properties:
                featureVector.extend(i.mAvgColor)
            if 'shape' in properties:
                featureVector.extend(i.mHu)
            if 'position' in properties:
                featureVector.extend(i.extents())
            if not featureVector :
                logger.warning("properties parameter is not specified properly")
                return
            X.append(featureVector)
        if method == "kmeans":
            
            # Ignore minor version numbers.
            sklearn_version = re.search(r'\d+\.\d+', __version__).group()
            
            if (float(sklearn_version) > 0.11):
                k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)
            else:
                k_means = KMeans(init='random', k=k, n_init=10).fit(X)
            KClusters = [ FeatureSet([]) for i in range(k)]
            for i in range(len(self)):
                KClusters[k_means.labels_[i]].append(self[i])
            return KClusters
        if method == "hierarchical":
            ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)
            WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]
            for i in range(len(self)):
                WClusters[ward.labels_[i]].append(self[i])
            return WClusters
    @property
    def image(self):
        if not len(self):
            return None
        return self[0].image
    @image.setter
    def image(self, i):
        for f in self:
            f.image = i
### ----------------------------------------------------------------------------
### ----------------------------------------------------------------------------
### ----------------------------FEATURE CLASS-----------------------------------
### ----------------------------------------------------------------------------
### ----------------------------------------------------------------------------
class Feature(object):
    """
    **SUMMARY**
    The Feature object is an abstract class which real features descend from.
    Each feature object has:
    * a draw() method,
    * an image property, referencing the originating Image object
    * x and y coordinates
    * default functions for determining angle, area, meanColor, etc for FeatureSets
    * in the Feature class, these functions assume the feature is 1px
    """
    x = 0.00
    y = 0.00
    _mMaxX = None
    _mMaxY = None
    _mMinX = None
    _mMinY = None
    _mWidth = None
    _mHeight = None
    _mSrcImgW = None
    _mSrcImgH = None
    # This is 2.0 refactoring
    mBoundingBox = None # THIS SHALT BE TOP LEFT (X,Y) THEN W H i.e. [X,Y,W,H]
    mExtents = None # THIS SHALT BE [MAXX,MINX,MAXY,MINY]
    points = None  # THIS SHALT BE (x,y) tuples in the ORDER [(TopLeft),(TopRight),(BottomLeft),(BottomRight)]
    image = "" #parent image
    #points = []
    #boundingBox = []
    def __init__(self, i, at_x, at_y, points):
        #THE COVENANT IS THAT YOU PROVIDE THE POINTS IN THE SPECIFIED FORMAT AND ALL OTHER VALUES SHALT FLOW
        self.x = at_x
        self.y = at_y
        self.image = i
        self.points = points
        self._updateExtents(new_feature=True)
    def reassign(self, img):
        """
        **SUMMARY**
        Reassign the image of this feature and return an updated copy of the feature.
        **PARAMETERS**
        * *img* - the new image to which to assign the feature.
        .. Warning::
          THIS DOES NOT PERFORM A SIZE CHECK. IF YOUR NEW IMAGE IS NOT THE EXACT SAME SIZE YOU WILL CERTAINLY CAUSE ERRORS.
        **EXAMPLE**
        >>> img = Image("lenna")
        >>> img2 = img.invert()
        >>> l = img.findLines()
        >>> l2 = img.reassignImage(img2)
        >>> l2.show()
        """
        retVal = copy.deepcopy(self)
        if( self.image.width != img.width or
            self.image.height != img.height ):
            warnings.warn("DON'T REASSIGN IMAGES OF DIFFERENT SIZES")
        retVal.image = img
        return retVal
    def corners(self):
        self._updateExtents()
        return self.points
    def coordinates(self):
        """
        **SUMMARY**
        Returns the x,y position of the feature. This is usually the center coordinate.
        **RETURNS**
        Returns an (x,y) tuple of the position of the feature.
        **EXAMPLE**
        >>> img = Image("aerospace.png")
        >>> blobs = img.findBlobs()
        >>> for b in blobs:
        >>>    print b.coordinates()
        """
        return np.array([self.x, self.y])
    def draw(self, color = Color.GREEN):
        """
        **SUMMARY**
        This method will draw the feature on the source image.
        **PARAMETERS**
        * *color* - The color as an RGB tuple to render the image.
        **RETURNS**
        Nothing.
        **EXAMPLE**
        >>> img = Image("RedDog2.jpg")
        >>> blobs = img.findBlobs()
        >>> blobs[-1].draw()
        >>> img.show()
        """
        self.image[self.x, self.y] = color
    def show(self, color = Color.GREEN):
        """
        **SUMMARY**
        This function will automatically draw the features on the image and show it.
        **RETURNS**
        Nothing.
        **EXAMPLE**
        >>> img = Image("logo")
        >>> feat = img.findBlobs()
        >>> feat[-1].show() #window pops up.
        """
        self.draw(color)
        self.image.show()
    def distanceFrom(self, point = (-1, -1)):
        """
        **SUMMARY**
        Given a point (default to center of the image), return the euclidean distance of x,y from this point.
        **PARAMETERS**
        * *point* - The point, as an (x,y) tuple on the image to measure distance from.
        **RETURNS**
        The distance as a floating point value in pixels.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> blobs[-1].distanceFrom(blobs[-2].coordinates())
        """
        if (point[0] == -1 or point[1] == -1):
            point = np.array(self.image.size()) / 2
        return spsd.euclidean(point, [self.x, self.y])
    def meanColor(self):
        """
        **SUMMARY**
        Return the average color within the feature as a tuple.
        **RETURNS**
        An RGB color tuple.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    if (b.meanColor() == color.WHITE):
        >>>       print "Found a white thing"
        """
        return self.image[self.x, self.y]
    def colorDistance(self, color = (0, 0, 0)):
        """
        **SUMMARY**
        Return the euclidean color distance of the color tuple at x,y from a given color (default black).
        **PARAMETERS**
        * *color* - An RGB triplet to calculate from which to calculate the color distance.
        **RETURNS**
        A floating point color distance value.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    print b.colorDistance(color.WHITE):
        """
        return spsd.euclidean(np.array(color), np.array(self.meanColor()))
    def angle(self):
        """
        **SUMMARY**
        Return the angle (theta) in degrees of the feature. The default is 0 (horizontal).
        .. Warning::
          This is not a valid operation for all features.
        **RETURNS**
        An angle value in degrees.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    if b.angle() == 0:
        >>>       print "I AM HORIZONTAL."
        **TODO**
        Double check that values are being returned consistently.
        """
        return 0
    def length(self):
        """
        **SUMMARY**
        This method returns the longest dimension of the feature (i.e max(width,height)).
        **RETURNS**
        A floating point length value.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    if b.length() > 200:
        >>>       print "OH MY! - WHAT A BIG FEATURE YOU HAVE!"
        >>>       print "---I bet you say that to all the features."
        **TODO**
        Should this be sqrt(x*x+y*y)?
        """
        return float(np.max([self.width(),self.height()]))
    def distanceToNearestEdge(self):
        """
        **SUMMARY**
        This method returns the distance, in pixels, from the nearest image edge.
        **RETURNS**
        The integer distance to the nearest edge.
        **EXAMPLE**
        >>> img = Image("../sampleimages/EdgeTest1.png")
        >>> b = img.findBlobs()
        >>> b[0].distanceToNearestEdge()
        """
        w = self.image.width
        h = self.image.height
        return np.min([self._mMinX,self._mMinY, w-self._mMaxX,h-self._mMaxY])
    def onImageEdge(self,tolerance=1):
        """
        **SUMMARY**
        This method returns True if the feature is less than `tolerance`
        pixels away from the nearest edge.
        **PARAMETERS**
        * *tolerance* - the distance in pixels at which a feature qualifies
          as being on the image edge.
        **RETURNS**
        True if the feature is on the edge, False otherwise.
        **EXAMPLE**
        >>> img = Image("../sampleimages/EdgeTest1.png")
        >>> b = img.findBlobs()
        >>> if(b[0].onImageEdge()):
        >>>     print "HELP! I AM ABOUT TO FALL OFF THE IMAGE"
        """
        # this has to be one to deal with blob library weirdness that goes deep down to opencv
        return ( self.distanceToNearestEdge() <= tolerance )
    def notOnImageEdge(self,tolerance=1):
        """
        **SUMMARY**
        This method returns True if the feature is greate than `tolerance`
        pixels away from the nearest edge.
        **PARAMETERS**
        * *tolerance* - the distance in pixels at which a feature qualifies
          as not being on the image edge.
        **RETURNS**
        True if the feature is not on the edge of the image, False otherwise.
        **EXAMPLE**
        >>> img = Image("../sampleimages/EdgeTest1.png")
        >>> b = img.findBlobs()
        >>> if(b[0].notOnImageEdge()):
        >>>     print "I am safe and sound."
        """
        # this has to be one to deal with blob library weirdness that goes deep down to opencv
        return ( self.distanceToNearestEdge() > tolerance )
    def aspectRatio(self):
        """
        **SUMMARY**
        Return the aspect ratio of the feature, which for our purposes
        is max(width,height)/min(width,height).
        **RETURNS**
        A single floating point value of the aspect ration.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> b[0].aspectRatio()
        """
        self._updateExtents()
        return self.mAspectRatio
    def area(self):
        """
        **SUMMARY**
        Returns the area (number of pixels)  covered by the feature.
        **RETURNS**
        An integer area of the feature.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    if b.area() > 200:
        >>>       print b.area()
        """
        return self.width() * self.height()
    def width(self):
        """
        **SUMMARY**
        Returns the height of the feature.
        **RETURNS**
        An integer value for the feature's width.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    if b.width() > b.height():
        >>>       print "wider than tall"
        >>>       b.draw()
        >>> img.show()
        """
        self._updateExtents()
        return self._mWidth
    def height(self):
        """
        **SUMMARY**
        Returns the height of the feature.
        **RETURNS**
        An integer value of the feature's height.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> for b in blobs:
        >>>    if b.width() > b.height():
        >>>       print "wider than tall"
        >>>       b.draw()
        >>> img.show()
        """
        self._updateExtents()
        return self._mHeight
    def crop(self):
        """
        **SUMMARY**
        This function crops the source image to the location of the feature and returns
        a new SimpleCV image.
        **RETURNS**
        A SimpleCV image that is cropped to the feature position and size.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> big = blobs[-1].crop()
        >>> big.show()
        """
        return self.image.crop(self.x, self.y, self.width(), self.height(), centered = True)
    def __repr__(self):
        return "%s.%s at (%d,%d)" % (self.__class__.__module__, self.__class__.__name__, self.x, self.y)
    def _updateExtents(self, new_feature=False):
#    mBoundingBox = None # THIS SHALT BE TOP LEFT (X,Y) THEN W H i.e. [X,Y,W,H]
#    mExtents = None # THIS SHALT BE [MAXX,MINX,MAXY,MINY]
#    points = None  # THIS SHALT BE (x,y) tuples in the ORDER [(TopLeft),(TopRight),(BottomLeft),(BottomRight)]
        max_x = self._mMaxX
        min_x = self._mMinX
        max_y = self._mMaxY
        min_y = self._mMinY
        width = self._mWidth
        height = self._mHeight
        extents = self.mExtents
        bounding_box = self.mBoundingBox
        #if new_feature or None in [self._mMaxX, self._mMinX, self._mMaxY, self._mMinY,
        #            self._mWidth, self._mHeight, self.mExtents, self.mBoundingBox]:
        if new_feature or None in [max_x, min_x, max_y, min_y, width, height, extents, bounding_box]:
            max_x = max_y = float("-infinity")
            min_x = min_y = float("infinity")
            for p in self.points:
                if (p[0] > max_x):
                    max_x = p[0]
                if (p[0] < min_x):
                    min_x = p[0]
                if (p[1] > max_y):
                    max_y = p[1]
                if (p[1] < min_y):
                    min_y = p[1]
            width = max_x - min_x
            height = max_y - min_y
            if (width <= 0):
                width = 1
            if (height <= 0):
                height = 1
            self.mBoundingBox = [min_x, min_y, width, height]
            self.mExtents = [max_x, min_x, max_y, min_y]
            if width > height:
                self.mAspectRatio = float(width/height)
            else:
                self.mAspectRatio = float(height/width)
            self._mMaxX = max_x
            self._mMinX = min_x
            self._mMaxY = max_y
            self._mMinY = min_y
            self._mWidth = width
            self._mHeight = height
    def boundingBox(self):
        """
        **SUMMARY**
        This function returns a rectangle which bounds the blob.
        **RETURNS**
        A list of [x, y, w, h] where (x, y) are the top left point of the rectangle
        and w, h are its width and height respectively.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].boundingBox()
        """
        self._updateExtents()
        return self.mBoundingBox
    def extents(self):
        """
        **SUMMARY**
        This function returns the maximum and minimum x and y values for the feature and
        returns them as a tuple.
        **RETURNS**
        A tuple of the extents of the feature. The order is (MaxX,MaxY,MinX,MinY).
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].extents()
        """
        self._updateExtents()
        return self.mExtents
    def minY(self):
        """
        **SUMMARY**
        This method return the minimum y value of the bounding box of the
        the feature.
        **RETURNS**
        An integer value of the minimum y value of the feature.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].minY()
        """
        self._updateExtents()
        return self._mMinY
    def maxY(self):
        """
        **SUMMARY**
        This method return the maximum y value of the bounding box of the
        the feature.
        **RETURNS**
        An integer value of the maximum y value of the feature.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].maxY()
        """
        self._updateExtents()
        return self._mMaxY
    def minX(self):
        """
        **SUMMARY**
        This method return the minimum x value of the bounding box of the
        the feature.
        **RETURNS**
        An integer value of the minimum x value of the feature.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].minX()
        """
        self._updateExtents()
        return self._mMinX
    def maxX(self):
        """
        **SUMMARY**
        This method return the minimum x value of the bounding box of the
        the feature.
        **RETURNS**
        An integer value of the maxium x value of the feature.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].maxX()
        """
        self._updateExtents()
        return self._mMaxX
    def topLeftCorner(self):
        """
        **SUMMARY**
        This method returns the top left corner of the bounding box of
        the blob as an (x,y) tuple.
        **RESULT**
        Returns a tupple of the top left corner.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].topLeftCorner()
        """
        self._updateExtents()
        return (self._mMinX,self._mMinY)
    def bottomRightCorner(self):
        """
        **SUMMARY**
        This method returns the bottom right corner of the bounding box of
        the blob as an (x,y) tuple.
        **RESULT**
        Returns a tupple of the bottom right corner.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].bottomRightCorner()
        """
        self._updateExtents()
        return (self._mMaxX,self._mMaxY)
    def bottomLeftCorner(self):
        """
        **SUMMARY**
        This method returns the bottom left corner of the bounding box of
        the blob as an (x,y) tuple.
        **RESULT**
        Returns a tupple of the bottom left corner.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].bottomLeftCorner()
        """
        self._updateExtents()
        return (self._mMinX,self._mMaxY)
    def topRightCorner(self):
        """
        **SUMMARY**
        This method returns the top right corner of the bounding box of
        the blob as an (x,y) tuple.
        **RESULT**
        Returns a tupple of the top right  corner.
        **EXAMPLE**
        >>> img = Image("OWS.jpg")
        >>> blobs = img.findBlobs(128)
        >>> print blobs[-1].topRightCorner()
        """
        self._updateExtents()
        return (self._mMaxX,self._mMinY)
    def above(self,object):
        """
        **SUMMARY**
        Return true if the feature is above the object, where object can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *object*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature is above the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].above(b) ):
        >>>    print "above the biggest blob"
        """
        if( isinstance(object,Feature) ):
            return( self.maxY() < object.minY() )
        elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
            return( self.maxY() < object[1]  )
        elif( isinstance(object,float) or isinstance(object,int) ):
            return( self.maxY() < object )
        else:
            logger.warning("SimpleCV did not recognize the input type to feature.above(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
            return None
    def below(self,object):
        """
        **SUMMARY**
        Return true if the feature is below the object, where object can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *object*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature is below the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].below(b) ):
        >>>    print "above the biggest blob"
        """
        if( isinstance(object,Feature) ):
            return( self.minY() > object.maxY() )
        elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
            return( self.minY() > object[1]  )
        elif( isinstance(object,float) or isinstance(object,int) ):
            return( self.minY() > object )
        else:
            logger.warning("SimpleCV did not recognize the input type to feature.below(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
            return None
    def right(self,object):
        """
        **SUMMARY**
        Return true if the feature is to the right object, where object can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *object*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature is to the right object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].right(b) ):
        >>>    print "right of the the blob"
        """
        if( isinstance(object,Feature) ):
            return( self.minX() > object.maxX() )
        elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
            return( self.minX() > object[0]  )
        elif( isinstance(object,float) or isinstance(object,int) ):
            return( self.minX() > object )
        else:
            logger.warning("SimpleCV did not recognize the input type to feature.right(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
            return None
    def left(self,object):
        """
        **SUMMARY**
        Return true if the feature is to the left of  the object, where object can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *object*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature is to the left of  the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].left(b) ):
        >>>    print "left of  the biggest blob"
        """
        if( isinstance(object,Feature) ):
            return( self.maxX() < object.minX() )
        elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
            return( self.maxX() < object[0]  )
        elif( isinstance(object,float) or isinstance(object,int) ):
            return( self.maxX() < object )
        else:
            logger.warning("SimpleCV did not recognize the input type to feature.left(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
            return None
    def contains(self,other):
        """
        **SUMMARY**
        Return true if the feature contains  the object, where object can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *object*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature contains the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].contains(b) ):
        >>>    print "this blob is contained in the biggest blob"
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
        """
        retVal = False
        bounds = self.points
        if( isinstance(other,Feature) ):# A feature
            retVal = True
            for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.
                p2 = (int(p[0]),int(p[1]))
                retVal = self._pointInsidePolygon(p2,bounds)
                if( not retVal ):
                    break
        # a single point
        elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):
            retVal = self._pointInsidePolygon(other,bounds)
        elif( isinstance(other,tuple) and len(other)==3 ): # A circle
            #assume we are in x,y, r format
            retVal = True
            rr = other[2]*other[2]
            x = other[0]
            y = other[1]
            for p in bounds:
                test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))
                if( test < rr ):
                    retVal = False
                    break
        elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):
            retVal = ( self.maxX() <= other[0]+other[2] and
                       self.minX() >= other[0] and
                       self.maxY() <= other[1]+other[3] and
                       self.minY() >= other[1] )
        elif(isinstance(other,list) and len(other) >= 4): # an arbitrary polygon
            #everything else ....
            retVal = True
            for p in other:
                test = self._pointInsidePolygon(p,bounds)
                if(not test):
                    retVal = False
                    break
        else:
            logger.warning("SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.")
            return False
        return retVal
    def overlaps(self, other):
        """
        **SUMMARY**
        Return true if the feature overlaps the object, where object can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *object*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature overlaps  object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].overlaps(b) ):
        >>>    print "This blob overlaps the biggest blob"
        Returns true if this blob contains at least one point, part of a collection
        of points, or any part of a blob.
        **NOTE**
        This currently performs a bounding box test, not a full polygon test for speed.
       """
        retVal = False
        bounds = self.points
        if( isinstance(other,Feature) ):# A feature
            retVal = True
            for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.
                retVal = self._pointInsidePolygon(p,bounds)
                if( retVal ):
                    break
        elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):
            retVal = self._pointInsidePolygon(other,bounds)
        elif( isinstance(other,tuple) and len(other)==3 and not isinstance(other[0],tuple)): # A circle
            #assume we are in x,y, r format
            retVal = False
            rr = other[2]*other[2]
            x = other[0]
            y = other[1]
            for p in bounds:
                test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))
                if( test < rr ):
                    retVal = True
                    break
        elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):
            retVal = ( self.contains( (other[0],other[1] ) ) or # see if we contain any corner
                       self.contains( (other[0]+other[2],other[1] ) ) or
                       self.contains( (other[0],other[1]+other[3] ) ) or
                       self.contains( (other[0]+other[2],other[1]+other[3] ) ) )
        elif(isinstance(other,list) and len(other)  >= 3): # an arbitrary polygon
            #everything else ....
            retVal = False
            for p in other:
                test = self._pointInsidePolygon(p,bounds)
                if(test):
                    retVal = True
                    break
        else:
            logger.warning("SimpleCV did not recognize the input type to features.overlaps. This method only takes another blob, an (x,y) tuple, or a ndarray type.")
            return False
        return retVal
    def doesNotContain(self, other):
        """
        **SUMMARY**
        Return true if the feature does not contain  the other object, where other can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *other*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature does not contain the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].doesNotContain(b) ):
        >>>    print "above the biggest blob"
        Returns true if all of features points are inside this point.
        """
        return not self.contains(other)
    def doesNotOverlap( self, other):
        """
        **SUMMARY**
        Return true if the feature does not overlap the object other, where other can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *other*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature does not Overlap  the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].doesNotOverlap(b) ):
        >>>    print "does not over overlap biggest blob"
        """
        return not self.overlaps( other)
    def isContainedWithin(self,other):
        """
        **SUMMARY**
        Return true if the feature is contained withing  the object other, where other can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *other*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature is above the object, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].isContainedWithin(b) ):
        >>>    print "inside the blob"
        """
        retVal = True
        bounds = self.points
        if( isinstance(other,Feature) ): # another feature do the containment test
            retVal = other.contains(self)
        elif( isinstance(other,tuple) and len(other)==3 ): # a circle
            #assume we are in x,y, r format
            rr = other[2]*other[2] # radius squared
            x = other[0]
            y = other[1]
            for p in bounds:
                test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))
                if( test > rr ):
                    retVal = False
                    break
        elif( isinstance(other,tuple) and len(other)==4 and  # a bounding box
            ( isinstance(other[0],float) or isinstance(other[0],int))): # we assume a tuple of four is (x,y,w,h)
            retVal = ( self.maxX() <= other[0]+other[2] and
                       self.minX() >= other[0] and
                       self.maxY() <= other[1]+other[3] and
                       self.minY() >= other[1] )
        elif(isinstance(other,list) and len(other) > 2 ): # an arbitrary polygon
            #everything else ....
            retVal = True
            for p in bounds:
                test = self._pointInsidePolygon(p,other)
                if(not test):
                    retVal = False
                    break
        else:
            logger.warning("SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.")
            retVal = False
        return retVal
    def isNotContainedWithin(self,shape):
        """
        **SUMMARY**
        Return true if the feature is not contained within the shape, where shape can be a bounding box,
        bounding circle, a list of tuples in a closed polygon, or any other featutres.
        **PARAMETERS**
        * *shape*
          * A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
          * A bounding circle of the form (x,y,r)
          * A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
          * Any two dimensional feature (e.g. blobs, circle ...)
        **RETURNS**
        Returns a Boolean, True if the feature is not contained within the shape, False otherwise.
        **EXAMPLE**
        >>> img = Image("Lenna")
        >>> blobs = img.findBlobs()
        >>> b = blobs[0]
        >>> if( blobs[-1].isNotContainedWithin(b) ):
        >>>    print "Not inside the biggest blob"
        """
        return not self.isContainedWithin(shape)
    def _pointInsidePolygon(self,point,polygon):
        """
        returns true if tuple point (x,y) is inside polygon of the form ((a,b),(c,d),...,(a,b)) the polygon should be closed
        """
        # try:
        #     import cv2
        # except:
        #     logger.warning("Unable to import cv2")
        #     return False
        if( len(polygon) < 3 ):
            logger.warning("feature._pointInsidePolygon - this is not a valid polygon")
            return False
        if( not isinstance(polygon,list)):
            logger.warning("feature._pointInsidePolygon - this is not a valid polygon")
            return False
        #if( not isinstance(point,tuple) ):
            #if( len(point) == 2 ):
            #    point = tuple(point)
            #else:
            #    logger.warning("feature._pointInsidePolygon - this is not a valid point")
            #    return False
        #if( cv2.__version__ == '$Rev:4557'):
        counter = 0
        retVal = True
        p1 = None
        #print "point: " + str(point)
        poly = copy.deepcopy(polygon)
        poly.append(polygon[0])
        #for p2 in poly:
        N = len(poly)
        p1 = poly[0]
        for i in range(1,N+1):
            p2 = poly[i%N]
            if( point[1] > np.min((p1[1],p2[1])) ):
                if( point[1] <= np.max((p1[1],p2[1])) ):
                    if( point[0] <= np.max((p1[0],p2[0])) ):
                        if( p1[1] != p2[1] ):
                            test = float((point[1]-p1[1])*(p2[0]-p1[0]))/float(((p2[1]-p1[1])+p1[0]))
                            if( p1[0] == p2[0] or point[0] <= test ):
                                counter = counter + 1
            p1 = p2
        if( counter % 2 == 0 ):
            retVal = False
            return retVal
        return retVal
        #else:
        #    result = cv2.pointPolygonTest(np.array(polygon,dtype='float32'),point,0)
        #    return result > 0 
    def boundingCircle(self):
        """
        **SUMMARY**
        This function calculates the minimum bounding circle of the blob in the image
        as an (x,y,r) tuple
        **RETURNS**
        An (x,y,r) tuple where (x,y) is the center of the circle and r is the radius
        **EXAMPLE**
        >>> img = Image("RatMask.png")
        >>> blobs = img.findBlobs()
        >>> print blobs[-1].boundingCircle()
        """
        try:
            import cv2
        except:
            logger.warning("Unable to import cv2")
            return None
        # contour of the blob in image
        contour = self.contour()
        points = []
        # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()
        for pair in contour:
            points.append([[pair[0], pair[1]]])
        points = np.array(points)
        (cen, rad) = cv2.minEnclosingCircle(points);
        return (cen[0], cen[1], rad)
#---------------------------------------------
 | 
	bsd-3-clause | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
