repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
icdishb/scikit-learn | setup.py | 7 | 6416 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
maheshakya/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
peterwilletts24/Python-Scripts | TRMM/trmm_plot_3hourly.py | 1 | 4007 | import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm as cm_base
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
from matplotlib.patches import Polygon
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import datetime
from dateutil import tz
min_contour = 0
max_contour = 3
tick_interval=0.3
lon_high = 102
lon_low = 64
lat_high= 30.
lat_low=-10.5
divisor=10 # for lat/lon rounding
sum_dom, latitude_domsingle, longitude_domsingle, time_dom, hours= pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/TRMM/trmm_emb_pcpmean_by_hour.p', 'rb'))
# Calculate total at each lat,lon position
#mean_dom = np.mean(pcp_dom, axis=0)
#sum_dom = np.sum(pcp_dom, axis=0)
lon_mid=longitude_domsingle[90]
lat_mid=latitude_domsingle[80]
lons= longitude_domsingle[:]
lats = latitude_domsingle[:]
lons,lats = np.meshgrid(lons, lats)
#lon_0 = -nc.variables['true_lon'].getValue()
#lat_0 = nc.variables['true_lat'].getValue()
# create figure and axes instances
for i,h in enumerate(sum_dom):
fig = plt.figure(figsize=(8,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
hours_utc = datetime.datetime(2011,8,1,int(hours[i]), 0, 0, ).replace(tzinfo=from_zone)
hours_local=hours_utc.astimezone(to_zone).strftime('%H%M')
m = Basemap(projection='mill',\
llcrnrlat=lat_low,urcrnrlat=lat_high,\
llcrnrlon=lon_low,urcrnrlon=lon_high,\
rsphere=6371229.,resolution='h',area_thresh=10000)
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines(linewidth=0.5,color='#262626')
#m.drawstates()
m.drawcountries(linewidth=0.5,color='#262626')
# draw parallels.
parallels = np.arange(0.,90,divisor)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10, color='#262626')
# draw meridians
meridians = np.arange(0.,360., divisor)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10, color='#262626')
#ny = mean_dom.shape[0]; nx = mean_dom.shape[1]
#lons, lats = m.makegrid(longitude_dom[1,:], latitude_dom[1,:]) # get lat/lons of ny by nx evenly space grid.
x, y = m(lons, lats) # compute map proj coordinates.
# draw filled contours.
clevs = np.linspace(min_contour, max_contour,256)
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cs = m.contourf(x,y,h, clevs, cmap=cm_base.s3pcpn_l, extend='both')
# add colorbar.
#cbar = m.colorbar(cs,location='bottom',pad="5%")
cbar = m.colorbar(cs,location='bottom',pad="5%")
cbar.set_ticklabels(["${%.1f}$" % t for t in ticks])
cbar.set_label('mm/h')
if int(hours[i])<10:
hours[i] = '0%s' % int(hours[i])
print hours[i]
plt.savefig('/nfs/a90/eepdw/Figures/TRMM/TRMM_mean_EMBRACE_period_%s_notitle.png' % hours[i], format='png', bbox_inches='tight')
plt.title('TRMM Rainfall Retrieval Mean at %s00 UTC (%s IST) for EMBRACE Period - 21 days from 21st August 2011' % (hours[i], hours_local), fontsize=16, color='#262626')
plt.savefig('/nfs/a90/eepdw/Figures/TRMM/TRMM_mean_EMBRACE_period_%s_title.png' % hours[i], format='png', bbox_inches='tight')
plt.title('%s00 UTC - %s IST' % (hours[i], hours_local), fontsize=16, color='#262626')
plt.savefig('/nfs/a90/eepdw/Figures/TRMM/TRMM_mean_EMBRACE_period_%s_short_title.png' % hours[i], format='png', bbox_inches='tight')
#plt.show()
| mit |
demis001/scikit-bio | skbio/stats/distance/tests/test_permanova.py | 13 | 4940 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
class TestPERMANOVA(TestCase):
"""All results were verified with R (vegan::adonis)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_call_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999],
name='PERMANOVA results')
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_call_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999],
name='PERMANOVA results')
np.random.seed(0)
obs = permanova(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_call_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0],
name='PERMANOVA results')
obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_call_unequal_group_sizes(self):
exp = pd.Series(
index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645, 999],
name='PERMANOVA results')
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
charanpald/sandbox | sandbox/predictors/DecisionTree.py | 1 | 4207 |
"""
A wrapper for the Decision Tree learner in scikits.learn with model selection
functionality.
"""
import numpy
import logging
from sandbox.util.Evaluator import Evaluator
from sandbox.util.Parameter import Parameter
from sandbox.util.Util import Util
from sandbox.predictors.AbstractWeightedPredictor import AbstractWeightedPredictor
class DecisionTree(AbstractWeightedPredictor):
def __init__(self, criterion="gini", maxDepth=10, minSplit=30, type="class"):
try:
from sklearn import tree
except ImportError as error:
logging.debug(error)
return
super(DecisionTree, self).__init__()
self.maxDepth = maxDepth
self.minSplit = minSplit
self.criterion = criterion
self.type = type
self.maxDepths = numpy.arange(1, 10)
self.minSplits = numpy.arange(10, 51, 10)
def setMinSplit(self, minSplit):
Parameter.checkInt(minSplit, 0, float('inf'))
self.minSplit = minSplit
def getMinSplit(self):
return self.minSplit
def getMinSplits(self):
return self.minSplits
def getMaxDepths(self):
return self.maxDepths
def setMaxDepth(self, maxDepth):
Parameter.checkInt(maxDepth, 1, float('inf'))
self.maxDepth = maxDepth
def getMaxDepth(self):
return self.maxDepth
def learnModel(self, X, y):
try:
from sklearn import tree
except ImportError as error:
logging.debug(error)
return
classes = numpy.unique(y)
if classes.shape[0] == 2:
self.worstResponse = classes[classes!=self.bestResponse][0]
if self.type == "class":
self.learner = tree.DecisionTreeClassifier(criterion=self.criterion, max_depth=self.maxDepth, min_samples_split=self.minSplit)
else:
self.learner = tree.DecisionTreeRegressor(criterion=self.criterion, max_depth=self.maxDepth, min_samples_split=self.minSplit)
self.learner = self.learner.fit(X, y)
def getLearner(self):
return self.learner
def getClassifier(self):
return self.learner
def getTree(self):
return self.learner.tree_
def predict(self, X):
predY = self.learner.predict(X)
return predY
def copy(self):
try:
from sklearn import tree
except ImportError as error:
logging.debug(error)
return
decisionTree = DecisionTree(criterion=self.criterion, maxDepth=self.maxDepth, minSplit=self.minSplit, type=self.type)
return decisionTree
@staticmethod
def generate(maxDepth=10, minSplit=30):
def generatorFunc():
decisionTree = DecisionTree()
decisionTree.setMaxDepth(maxDepth)
decisionTree.setMinSplit(minSplit)
return decisionTree
return generatorFunc
def parallelVfcv(self, X, y, idx, type="gini"):
"""
Perform v fold penalisation model selection using the decision tree learner
and then pick the best one. Using the best set of parameters train using
the whole dataset.
:param X: The examples as rows
:type X: :class:`numpy.ndarray`
:param y: The binary -1/+1 labels
:type y: :class:`numpy.ndarray`
:param idx: A list of train/test splits
"""
Parameter.checkClass(X, numpy.ndarray)
Parameter.checkClass(y, numpy.ndarray)
paramDict = {}
paramDict["setMinSplit"] = self.minSplits
paramDict["setMaxDepth"] = self.maxDepths
return self.parallelModelSelect(X, y, idx, paramDict)
def getMetricMethod(self):
if self.type == "class":
return Evaluator.binaryError
else:
return Evaluator.rootMeanSqError
def __str__(self):
outputStr = self.type
outputStr += " maxDepth=" + str(self.maxDepth)
outputStr += " minSplit=" + str(self.minSplit)
outputStr += " criterion=" + str(self.criterion)
return outputStr
| gpl-3.0 |
sahat/bokeh | sphinx/source/tutorial/exercises/unemployment.py | 2 | 2107 | import numpy as np
import pandas as pd
from bokeh.plotting import *
from bokeh.objects import HoverTool
from bokeh.sampledata.unemployment1948 import data
from collections import OrderedDict
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
# EXERCISE: create a `ColumnDataSource` with columns: month, year, color, rate
# EXERCISE: output to static HTML file
# EXERCISE: create a new figure
# EXERCISE: use the `rect renderer with the following attributes:
# - x_range is years, y_range is months (reversed)
# - fill color for the rectangles is the 'color' field
# - line_color for the rectangles is None
# - tools are resize and hover tools
# - add a nice title, and set the plot_width and plot_height
rect('year', 'month', 0.95, 0.95, source=source,
x_range=years, y_range=list(reversed(months)),
color=
line_color=
tools=
title=
plot_width=
plot_height=
)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
# EXERCISE: configure the hover tool to display the month, year and rate
hover = [t for t in curplot().tools if isinstance(t, HoverTool)][0]
hover.tooltips = OrderedDict([
# fill me in
])
show() # show the plot | bsd-3-clause |
Jetson-TX1-AndroidTV/android_kernel_shield_tv_video4linux | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
xwolf12/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
scottpurdy/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/patches.py | 69 | 110325 | # -*- coding: utf-8 -*-
from __future__ import division
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
import matplotlib.colors as colors
import matplotlib.transforms as transforms
from matplotlib.path import Path
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
artist.kwdocd['Patch'] = """
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
"""
class Patch(artist.Artist):
"""
A patch is a 2D thingy with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
def __str__(self):
return str(self.__class__).split('.')[-1]
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bézier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if callable(self._contains): return self._contains(self,mouseevent)
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform())
return inside, {}
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
return artist.Artist.get_transform(self)
def get_patch_transform(self):
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None: aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None: color = mpl.rcParams['patch.edgecolor']
self._edgecolor = color
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None: color = mpl.rcParams['patch.facecolor']
self._facecolor = color
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None: w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None: ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self.fill = b
def get_fill(self):
'return whether fill is set'
return self.fill
def set_hatch(self, h):
"""
Set the hatching pattern
hatch can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
# - crossed
x - crossed diagonal
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching in that direction.
CURRENT LIMITATIONS:
1. Hatching is supported in the PostScript backend only.
2. Hatching is done with solid black lines of width 0.
ACCEPTS: [ '/' | '\\' | '|' | '-' | '#' | 'x' ]
"""
self._hatch = h
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible(): return
#renderer.open_group('patch')
gc = renderer.new_gc()
if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='none':
gc.set_linewidth(0)
else:
gc.set_foreground(self._edgecolor)
gc.set_linewidth(self._linewidth)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('projecting')
gc.set_url(self._url)
gc.set_snap(self._snap)
if (not self.fill or self._facecolor is None or
(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='none')):
rgbFace = None
gc.set_alpha(1.0)
else:
r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)
rgbFace = (r, g, b)
gc.set_alpha(a)
if self._hatch:
gc.set_hatch(self._hatch )
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
renderer.draw_path(gc, tpath, affine, rgbFace)
#renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
artist.kwdocd['Patch'] = patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch'):
artist.kwdocd[k] = patchdoc
# define Patch.__init__ after the class so that the docstring can be
# auto-generated.
def __patch__init__(self,
edgecolor=None,
facecolor=None,
linewidth=None,
linestyle=None,
antialiased = None,
hatch = None,
fill=True,
**kwargs
):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None: linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None: linestyle = "solid"
if antialiased is None: antialiased = mpl.rcParams['patch.antialiased']
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.fill = fill
self._combined_transform = transforms.IdentityTransform()
if len(kwargs): artist.setp(self, **kwargs)
__patch__init__.__doc__ = cbook.dedent(__patch__init__.__doc__) % artist.kwdocd
Patch.__init__ = __patch__init__
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)"%(str(self.patch))
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._update_transform()
self._update()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r,g,b,a = colors.colorConverter.to_rgba(self.patch.get_facecolor())
rho = 0.3
r = rho*r
g = rho*g
b = rho*b
self.set_facecolor((r,g,b,0.5))
self.set_edgecolor((r,g,b,0.5))
def _update_transform(self):
self._shadow_transform = transforms.Affine2D().translate(self._ox, self._oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
self._update_transform()
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
self._update_transform()
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
def __init__(self, xy, width, height, **kwargs):
"""
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
self._rect_transform = transforms.BboxTransformTo(bbox)
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width==0 or self._height==0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args)==0:
l,b,w,h = args[0]
else:
l,b,w,h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)"%(self._numVertices,self._xy[0],self._xy[1])
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, xy):
self._orientation = xy
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, xy):
self._radius = xy
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`:
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`:
For additional kwargs
"""
Patch.__init__(self, **kwargs)
xy = np.asarray(xy, np.float_)
self._path = Path(xy)
self.set_closed(closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
def get_closed(self):
return self._closed
def set_closed(self, closed):
self._closed = closed
xy = self._get_xy()
if closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy)>2 and (xy[0]==xy[-1]).all():
xy = xy[0:-1]
self._set_xy(xy)
def get_xy(self):
return self._path.vertices
def set_xy(self, vertices):
self._path = Path(vertices)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)"%(self.theta1,self.theta2)
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r,self.width = r,width
self.theta1,self.theta2 = theta1,theta2
# Inner and outer rings are connected unless the annulus is complete
delta=theta2-theta1
if abs((theta2-theta1) - 360) <= 1e-12:
theta1,theta2 = 0,360
connector = Path.MOVETO
else:
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1,theta2)
if width is not None:
# Partial annulus needs to draw the outter ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1]*float(r-width)/r
v = np.vstack([v1,v2,v1[0,:],(0,0)])
c = np.hstack([arc.codes,arc.codes,connector,Path.CLOSEPOLY])
c[len(arc.codes)]=connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices,[(0,0),arc.vertices[0,:],(0,0)]])
c = np.hstack([arc.codes,[connector,connector,Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= r
v += np.asarray(center)
self._path = Path(v,c)
self._patch_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path( [
[ 0.0, 0.1 ], [ 0.0, -0.1],
[ 0.8, -0.1 ], [ 0.8, -0.3],
[ 1.0, 0.0 ], [ 0.8, 0.3],
[ 0.8, 0.1 ], [ 0.0, 0.1] ] )
def __init__( self, x, y, dx, dy, width=1.0, **kwargs ):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx**2+dy**2) or 1 # account for div by zero
cx = float(dx)/L
sx = float(dy)/L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False, \
head_width=None, head_length=None, shape='full', overhang=0, \
head_starts_at_zero=False,**kwargs):
"""
Constructor arguments
*length_includes_head*:
*True* if head is counted in calculating the length.
*shape*: ['full', 'left', 'right']
*overhang*:
distance that the arrow is swept back (0 overhang means
triangular shape).
*head_starts_at_zero*:
If *True*, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Valid kwargs are:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx**2 + dy**2)
if length_includes_head:
length=distance
else:
length=distance+head_length
if not length:
verts = [] #display nothing if empty
else:
#start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0,0.0], #tip
[-hl, -hw/2.0], #leftmost
[-hl*(1-hs), -lw/2.0], #meets stem
[-length, -lw/2.0], #bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length/2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow*[1,-1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords=np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError, "Got unknown shape: %s" % shape
cx = float(dx)/distance
sx = float(dy)/distance
M = np.array([[cx, sx],[-sx,cx]])
verts = np.dot(coords, M) + (x+dx, y+dy)
Polygon.__init__(self, map(tuple, verts), **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
def __init__(self, figure, xytip, xybase, width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.figure = figure
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width*self.figure.dpi/72./2.
k2 = self.headwidth*self.figure.dpi/72./2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2-y1, x2-x1)
r = math.sqrt((y2-y1)**2. + (x2-x1)**2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(zip(xs, ys))
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1,y1,x2,y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1,y1,x2,y2,k = map(float, (x1,y1,x2,y2,k))
if y2-y1 == 0:
return x2, y2+k, x2, y2-k
elif x2-x1 == 0:
return x2+k, y2, x2-k, y2
m = (y2-y1)/(x2-x1)
pm = -1./m
a = 1
b = -2*y2
c = y2**2. - k**2.*pm**2./(1. + pm**2.)
y3a = (-b + math.sqrt(b**2.-4*a*c))/(2.*a)
x3a = (y3a - y2)/pm + x2
y3b = (-b - math.sqrt(b**2.-4*a*c))/(2.*a)
x3b = (y3b - y2)/pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)"%self.center
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
**kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)"%(self.center[0],self.center[1],self.width,self.height)
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self,ev):
if ev.x is None or ev.y is None: return False,{}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x*x + y*y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)"%(self.center[0],self.center[1],self.radius)
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
if 'resolution' in kwargs:
import warnings
warnings.warn('Circle is now scale free. Use CirclePolygon instead!', DeprecationWarning)
kwargs.pop('resolution')
self.radius = radius
Ellipse.__init__(self, xy, radius*2, radius*2, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)"%(self.center[0],self.center[1],self.width,self.height)
def __init__(self, xy, width, height, angle=0.0, theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.pop('fill')
if fill:
raise ValueError("Arc objects can not be filled")
kwargs['fill'] = False
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx*dx + dy*dy
D = x0*y1 - x1*y0
D2 = D*D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D*dy) / dr2
y = (-D*dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D*dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D*dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = thetas.keys()
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad), np.sin(theta1_rad)))
for theta in thetas:
if inside:
self._path = Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l,b,w,h = bbox.get_bounds()
r = Rectangle(xy=(l,b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None: r.set_transform(trans)
r.set_clip_on( False )
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl) for cell, cl in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl) for cell, cl in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles, leadingspace=2):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault) \
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av)) for an, av in args])
#adding quotes for now to work around tex bug treating '-' as itemize
_table.append([cls.__name__, "'%s'"%name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ","").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0/aspect_ratio, height/aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:,1] = vertices[:,1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0), (x1, y0), (x1, y1), (x0, y1),
(x0, y0), (x0, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["square"] = Square
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2.*pad, \
height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
dx = (y1-y0)/2.
dxx = dx*.5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0+dxx, y0), (x1, y0), (x1, y1), (x0+dxx, y1),
(x0+dxx, y1+dxx), (x0-dx, y0+dx), (x0+dxx, y0-dxx), # arrow
(x0+dxx, y0), (x0+dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.RArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:,0] = 2*x0 + width - p.vertices[:,0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2.*pad, \
height + 2.*pad,
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
# Round corners are implemented as quadratic bezier. eg.
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0+dr, y0),
(x1-dr, y0),
(x1, y0), (x1, y0+dr),
(x1, y1-dr),
(x1, y1), (x1-dr, y1),
(x0+dr, y1),
(x0, y1), (x0, y1-dr),
(x0, y0+dr),
(x0, y0), (x0+dr, y0),
(x0+dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2.*pad - 2*dr, \
height + 2.*pad - 2*dr,
x0, y0 = x0-pad+dr, y0-pad+dr,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0),
(x0+dr, y0-dr), (x1-dr, y0-dr), (x1, y0),
(x1+dr, y0+dr), (x1+dr, y1-dr), (x1, y1),
(x1-dr, y1+dr), (x0+dr, y1+dr), (x0, y1),
(x0-dr, y1-dr), (x0-dr, y0+dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2.*pad - tooth_size, \
height + 2.*pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0-pad+tooth_size2, y0-pad+tooth_size2
x1, y1 = x0+width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx*.5* i for i in range(dsx_n*2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0, y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2, x1, x1 - tooth_size2, x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy*.5* i for i in range(dsy_n*2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx*.5* i for i in range(dsx_n*2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2, y1, y1 - tooth_size2, y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2, x0, x0 + tooth_size2, x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy*.5* i for i in range(dsy_n*2)] + \
[y0 + tooth_size2]
saw_vertices = zip(bottom_saw_x, bottom_saw_y) + \
zip(right_saw_x, right_saw_y) + \
zip(top_saw_x, top_saw_y) + \
zip(left_saw_x, left_saw_y) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)
path = Path(saw_vertices)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""
A roundtooth(?) box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width, height, mutation_size)
cp = [Path.MOVETO] + ([Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2))
path = Path(saw_vertices, cp)
return path
_style_list["roundtooth"] = Roundtooth
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "FancyBboxPatch(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale=mutation_scale
self._mutation_aspect=mutation_aspect
kwdoc = dict()
kwdoc["AvailableBoxstyles"]=_pprint_styles(BoxStyle._style_list)
kwdoc.update(artist.kwdocd)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % kwdoc
del kwdoc
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: [ %(AvailableBoxstyles)s ]
"""
if boxstyle==None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
kwdoc = dict()
kwdoc["AvailableBoxstyles"]=_pprint_styles(BoxStyle._style_list)
kwdoc.update(artist.kwdocd)
set_boxstyle.__doc__ = cbook.dedent(set_boxstyle.__doc__) % kwdoc
del kwdoc
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale=scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect=aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args)==0:
l,b,w,h = args[0]
else:
l,b,w,h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y, self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB, patchA=None, patchB=None, shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
#xy_display = patchA.get_data_transform().transform_point(xy_data)
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
#xy_display = patchB.get_data_transform().transform_point(xy_data)
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
left, right = split_path_inout(path, insideA)
path = right
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
left, right = split_path_inout(path, insideB)
path = left
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2)/2., (y1 + y2)/2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f*dy, y12 - f*dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA/180.*math.pi),\
math.sin(self.angleA/180.*math.pi),
cosB, sinB = math.cos(self.angleB/180.*math.pi),\
math.sin(self.angleB/180.*math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA/180.*math.pi),\
math.sin(self.angleA/180.*math.pi),
cosB, sinB = math.cos(self.angleB/180.*math.pi),\
-math.sin(self.angleB/180.*math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
vertices.extend([(cx - self.rad * cosA, cy - self.rad * sinA),
(cx, cy),
(cx + self.rad * cosB, cy + self.rad * sinB)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA/180.*math.pi)
sinA = math.sin(self.angleA/180.*math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d*cosA, y1 + d*sinA))
d = self.armA
rounded.append((x1 + d*cosA, y1 + d*sinA))
if self.armB:
cosB = math.cos(self.angleB/180.*math.pi)
sinB = math.sin(self.angleB/180.*math.pi)
x_armB, y_armB = x2 + self.armB*cosB, y2 + self.armB*sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx*dx + dy*dy)**.5
rounded.append((xp + self.rad*dx/dd, yp + self.rad*dy/dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx*dx + dy*dy)**.5
d = dd - self.rad
rounded = [(xp + d*dx/dd, yp + d*dy/dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx*dx + dy*dy)**.5
rounded.append((xp + self.rad*dx/dd, yp + self.rad*dy/dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives the
path object along which the arrow will be drawn, and the
mutation_size, with which the amount arrow head and etc. will
be scaled. It returns a Path instance. The linewidth may be
used to adjust the the path so that it does not pass beyond
the given points.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:,1] = vertices[:,1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, closed = self.transmute(path_shrinked, linewidth,
mutation_size)
vertices, codes = path_mutated.vertices, path_mutated.codes
# Restore the height
vertices[:,1] = vertices[:,1] * aspect_ratio
return Path(vertices, codes), closed
else:
return self.transmute(path, mutation_size, linewidth)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow at the begin point and the
at the end point. The returned path is not closed and only meant
to be stroked.
"""
def __init__(self, beginarrow=None, endarrow=None,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size of
the arrow relative to the *mutation scale*.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
super(ArrowStyle._Curve, self).__init__()
def _get_pad_projected(self, x0, y0, x1, y1, linewidth):
# when no arrow head is drawn
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx**2 + dy**2)
# padx_projected, pady_projected : amount of pad to account
# projection of the wedge
padx_projected = (.5*linewidth)
pady_projected = (.5*linewidth)
# apply pad for projected edge
ddx = padx_projected * dx / cp_distance
ddy = pady_projected * dy / cp_distance
return ddx, ddy
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow is goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx**2 + dy**2)
# padx_projected, pady_projected : amount of pad for account
# the overshooting of the projection of the wedge
padx_projected = (.5*linewidth / cos_t)
pady_projected = (.5*linewidth / sin_t)
# apply pad for projected edge
ddx = padx_projected * dx / cp_distance
ddy = pady_projected * dy / cp_distance
# offset for arrow wedge
dx, dy = dx / cp_distance * head_dist, dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1+ddx+dx1, y1+ddy+dy1),
(x1+ddx, y1++ddy),
(x1+ddx+dx2, y1+ddy+dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length**2 + head_width**2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
#ddxA, ddyA = self._get_pad_projected(x1, y1, x0, y0, linewidth)
ddxA, ddyA = 0., 0., #self._get_pad_projected(x1, y1, x0, y0, linewidth)
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0. #self._get_pad_projected(x2, y2, x3, y3, linewidth)
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
vertices = np.concatenate([verticesA + [(x0+ddxA, y0+ddyA)],
path.vertices[1:-1],
[(x3+ddxB, y3+ddyB)] + verticesB])
codes = np.concatenate([codesA,
path.codes,
codesB])
p = Path(vertices, codes)
return p, False
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__( \
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__( \
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width )
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__( \
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width )
#_style_list["->"] = CurveB
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__( \
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width )
#_style_list["<->"] = CurveAB
_style_list["<->"] = CurveAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB= scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1+dx, y1+dy),
(x1, y1),
(x2, y2),
(x2+dx, y2+dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA*scaleA,
self.legnthA*scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB*scaleB,
self.lengthB*scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=None )
#_style_list["-["] = BracketB
_style_list["-["] = BracketB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(arrow_in, head_width/2.,
wm=.5)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out, tail_width/2.)
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
# path for head
in_f = inside_circle(x2, y2, head_length)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length*.8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head, head_width/2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width*.5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width*.3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
patch_path2 = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.CURVE3, tail_start),
(Path.CURVE3, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
return self.__class__.__name__ \
+ "FancyArrowPatch(%g,%g,%g,%g,%g,%g)" % tuple(self._q_bezier)
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale=mutation_scale
self._mutation_aspect=mutation_aspect
#self._draw_in_display_coordinate = True
kwdoc = dict()
kwdoc["AvailableArrowstyles"]=_pprint_styles(ArrowStyle._style_list)
kwdoc["AvailableConnectorstyles"]=_pprint_styles(ConnectionStyle._style_list)
kwdoc.update(artist.kwdocd)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % kwdoc
del kwdoc
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None: self._posA_posB[0] = posA
if posB is not None: self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle==None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle==None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ConnectionStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale=scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect=aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() medthod to retrieve the arrow path
in the disaply coord.
"""
_path = self.get_path_in_displaycoord()
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA,
shrinkB=self.shrinkB
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, closed = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth(),
self.get_mutation_aspect()
)
if not closed:
self.fill = False
return _path
def draw(self, renderer):
if not self.get_visible(): return
#renderer.open_group('patch')
gc = renderer.new_gc()
fill_orig = self.fill
path = self.get_path_in_displaycoord()
affine = transforms.IdentityTransform()
if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='none':
gc.set_linewidth(0)
else:
gc.set_foreground(self._edgecolor)
gc.set_linewidth(self._linewidth)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
if (not self.fill or self._facecolor is None or
(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='none')):
rgbFace = None
gc.set_alpha(1.0)
else:
r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)
rgbFace = (r, g, b)
gc.set_alpha(a)
if self._hatch:
gc.set_hatch(self._hatch )
renderer.draw_path(gc, path, affine, rgbFace)
self.fill = fill_orig
#renderer.close_group('patch')
| agpl-3.0 |
LaurentRDC/scikit-ued | skued/time_series/selections.py | 1 | 11570 | # -*- coding: utf-8 -*-
"""
Selection masks for assembling time-series.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
import numpy as np
class Selection(metaclass=ABCMeta):
"""
Abstract base class for time-series selection masks.
In the context of ultrafast electron/x-ray scattering, time-series are
assembled by integrating over a portion of scattering patterns for each
time-delay. This class is the generalization of selecting a rectangular
area of scattering patterns to arbitrary patterns, e.g. disks, torii, etc.
.. versionadded:: 2.0.2
Parameters
----------
shape : 2-tuple
Shape of scattering patterns on which the selection will be applied.
See also
--------
RectSelection : rectangular selection
DiskSelection : circular disk selection
RingSelection : ring selection, i.e. 2-torus
ArbitrarySelection : arbitrary selection
"""
def __init__(self, shape, *args, **kwargs):
self.shape = shape
@abstractmethod
def __array__(self, *args, **kwargs):
"""Cast as a NumPy array."""
pass
def mpatch(self, *args, **kwargs):
"""
Matplotlib patch associated with this selection.
keyword arguments are passed to the appropriate `Matplotlib.patches.Patch`
subclass.
By default, a patch drawing a rectangle around the bounding box is used.
.. versionadded:: 2.0.3
"""
top, bottom, left, right = self.bounding_box
return mpatches.Rectangle(
xy=(left, top), width=right - left, height=bottom - top, angle=0, **kwargs
)
# The method below should be specialized for subclasses.
@property
def bounding_box(self):
"""
Returns the array bounding box.
Returns
-------
r1, r2 : int
Row-wise bounds
c1, c2 : int
Column-wise bounds
"""
selection = self.__array__()
# The idea is to add values along rows and columns.
# Since True ~ 1, and False ~ 0, we can determine
# the bounding box by finding minimum and maximum indices
# where the projection is nonzero
# Warning : nonzero() returns a tuple!
(row_nonzero,) = np.sum(selection, axis=1).nonzero()
(col_nonzero,) = np.sum(selection, axis=0).nonzero()
# In case of empty selection (i.e. all False), min and max will fail
# Therefore, we replace with trivial value
if row_nonzero.size == 0:
row_nonzero = np.array([0])
if col_nonzero.size == 0:
col_nonzero = np.array([0])
r1, r2 = np.min(row_nonzero), np.max(row_nonzero) + 1
c1, c2 = np.min(col_nonzero), np.max(col_nonzero) + 1
return (r1, r2, c1, c2)
class ArbitrarySelection(Selection):
"""
Arbirary selection mask, represented by a boolean array.
.. versionadded:: 2.0.2
Parameters
----------
array : ndarray, ndim 2, dtype bool
Boolean array that evaluates to `True` on valid selections.
"""
def __init__(self, array):
self._array = np.asarray(array, dtype=bool)
super().__init__(shape=self._array.shape)
def __array__(self, *args, **kwargs):
return self._array
class RectSelection(Selection):
"""
Rectangular selection mask. Note that rectangular bounds are *inclusive*,
contrary to normal numpy index selections.
.. versionadded:: 2.0.2
Parameters
----------
shape : 2-tuple
Shape of the scattering patterns from which data will be selected.
r1, r2 : int
Row indices that bound the selection, inclusively.
c1, c2 : int
Column indices that bound the selection, inclusively.
"""
def __init__(self, shape, r1, r2, c1, c2):
super().__init__(shape=shape)
self._bbox = (r1, r2 + 1, c1, c2 + 1)
@property
def bounding_box(self):
"""
Returns the array bounding box.
Returns
-------
r1, r2 : int
Row-wise bounds
c1, c2 : int
Column-wise bounds
"""
return self._bbox
def __array__(self, *args, **kwargs):
arr = np.zeros(shape=self.shape, dtype=bool)
r1, r2, c1, c2 = self._bbox
arr[r1:r2, c1:c2] = True
return arr
class DiskSelection(Selection):
"""
Disk selection mask.
.. versionadded:: 2.0.2
Parameters
----------
shape : 2-tuple
Shape of the scattering patterns from which data will be selected.
center : 2-tuple of ints
Center (row, col) of the selection.
radius : float
Radius of the selection.
"""
def __init__(self, shape, center, radius):
super().__init__(shape=shape)
self._center = center
self._radius = radius
@property
def bounding_box(self):
"""
Returns the array bounding box.
Returns
-------
r1, r2 : int
Row-wise bounds
c1, c2 : int
Column-wise bounds
"""
rc, cc = self._center
return (
rc - self._radius,
rc + self._radius + 1,
cc - self._radius,
cc + self._radius + 1,
)
def __array__(self, *args, **kwargs):
center_row, center_col = self._center
selection = np.zeros(shape=self.shape, dtype=bool)
cc, rr = np.meshgrid(
np.arange(0, self.shape[0], dtype=int) - center_col,
np.arange(0, self.shape[1], dtype=int) - center_row,
)
distance = np.sqrt(rr ** 2 + cc ** 2)
selection[distance <= self._radius] = True
return selection
def mpatch(self, **kwargs):
"""
Circular patch. Keyword arguments are passed
to `matplotlib.patches.Circle`.
.. versionadded:: 2.0.3
Returns
-------
patch : matplotlib.patches.Circle
"""
y, x = self._center
return mpatches.Circle(xy=(x, y), radius=self._radius, angle=0, **kwargs)
class RingSelection(Selection):
"""
Ring selection mask, i.e. 2-torus.
.. versionadded:: 2.0.2
Parameters
----------
shape : 2-tuple
Shape of the scattering patterns from which data will be selected.
center : 2-tuple of ints
Center (row, col) of the selection.
inner_radius : float
Inner radius of the selection.
outer_radius : float
Outer radius of the selection.
"""
def __init__(self, shape, center, inner_radius, outer_radius):
if inner_radius > outer_radius:
raise ValueError("Inner radius cannot be larger than outer radius.")
super().__init__(shape=shape)
self._center = center
self._inner_radius = inner_radius
self._outer_radius = outer_radius
@property
def bounding_box(self):
"""
Returns the array bounding box.
Returns
-------
r1, r2 : int
Row-wise bounds
c1, c2 : int
Column-wise bounds
"""
rc, cc = self._center
return (
rc - self._outer_radius,
rc + self._outer_radius + 1,
cc - self._outer_radius,
cc + self._outer_radius + 1,
)
def __array__(self, *args, **kwargs):
center_row, center_col = self._center
selection = np.zeros(shape=self.shape, dtype=bool)
cc, rr = np.meshgrid(
np.arange(0, self.shape[0], dtype=int) - center_col,
np.arange(0, self.shape[1], dtype=int) - center_row,
)
distance = np.sqrt(rr ** 2 + cc ** 2)
selection[
np.logical_and(
distance >= self._inner_radius, distance <= self._outer_radius
)
] = True
return selection
# TODO: make new patch class
def mpatch(self, **kwargs):
"""
Toroidal patch. Keyword arguments are passed
to `matplotlib.patches.Circle`.
.. versionadded:: 2.0.3
Returns
-------
inner : matplotlib.patches.Circle
outer : matplotlib.patches.Circle
"""
y, x = self._center
inner_circ = mpatches.Circle(xy=(x, y), radius=self._inner_radius, **kwargs)
outer_circ = mpatches.Circle(xy=(x, y), radius=self._outer_radius, **kwargs)
return inner_circ, outer_circ
class RingArcSelection(Selection):
"""
Selection patch for a partial 2-torus.
.. versionadded:: 2.0.5
Parameters
----------
shape : 2-tuple
Shape of the scattering patterns from which data will be selected.
center : 2-tuple of ints
Center (row, col) of the selection.
inner_radius : float
Inner radius of the selection.
outer_radius : float
Outer radius of the selection.
angle : float
Rotation of the ring in degrees.
theta1, theta2 : float
Starting and ending angles of the 2-torus in degrees, relative to ``angle``.
"""
def __init__(
self, shape, center, inner_radius, outer_radius, angle=0, theta1=0, theta2=360
):
if inner_radius > outer_radius:
raise ValueError("Inner radius cannot be larger than outer radius.")
super().__init__(shape=shape)
self._center = center
self._inner_radius = inner_radius
self._outer_radius = outer_radius
self._angle = angle
self._theta1 = theta1
self._theta2 = theta2
@property
def bounding_box(self):
"""
Returns the array bounding box.
Returns
-------
r1, r2 : int
Row-wise bounds
c1, c2 : int
Column-wise bounds
"""
rc, cc = self._center
return (
rc - self._outer_radius,
rc + self._outer_radius + 1,
cc - self._outer_radius,
cc + self._outer_radius + 1,
)
def __array__(self, *args, **kwargs):
center_row, center_col = self._center
selection = np.zeros(shape=self.shape, dtype=bool)
cc, rr = np.meshgrid(
np.arange(0, self.shape[0], dtype=int) - center_col,
np.arange(0, self.shape[1], dtype=int) - center_row,
)
distance = np.sqrt(rr ** 2 + cc ** 2)
angle = np.rad2deg(np.arctan2(rr, cc)) + self._angle
angle[:] = np.mod(angle, 360)
distance_criteria = np.logical_and(
distance >= self._inner_radius, distance <= self._outer_radius
)
angle_criteria = np.logical_and(angle >= self._theta1, angle <= self._theta2)
selection[np.logical_and(angle_criteria, distance_criteria)] = True
return selection
def mpatch(self, **kwargs):
"""
Partial toroidal patch. Keyword arguments are passed
to `matplotlib.patches.Arc`.
Returns
-------
inner : matplotlib.patches.Circle
outer : matplotlib.patches.Circle
"""
y, x = self._center
arc = lambda radius: mpatches.Arc(
xy=(x, y),
width=2 * radius,
height=2 * radius,
angle=self._angle,
theta1=self._theta1,
theta2=self._theta2,
**kwargs
)
inner_arc = arc(self._inner_radius)
outer_arc = arc(self._outer_radius)
return inner_circ, outer_circ
| mit |
Scribery/cockpit | bots/learn/learn1.py | 2 | 6831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2017 Slavek Kabrda
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
# WARNING: As you change this code increment this version number so
# the machine learning model uses a new place to store the model
FILENAME = "tests-learn-2.nn"
import collections
import gzip
import pickle
import operator
import os
import re
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
# The threshhold for predicting based on learned data
PREDICT_THRESHHOLD = 0.70
def load(directory):
path = os.path.join(directory, FILENAME)
if not os.path.exists(path):
return None
with gzip.open(path, 'rb') as fp:
network = pickle.load(fp)
return network
def save(directory, model):
path = os.path.join(directory, FILENAME)
with gzip.open(path + ".tmp", 'wb') as fp:
pickle.dump(model, fp)
os.rename(path + ".tmp", path)
return path
# -----------------------------------------------------------------------------
# The Neural Network
class NNWithScaler1:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.network = MLPClassifier(hidden_layer_sizes=(500, 500))
self.scaler = StandardScaler()
def predict(self, item):
features, unused = self.digest(item)
X = self.scaler.transform([features])
return self.network.predict(X)[0]
def predict_proba(self, item):
features, unused = self.digest(item)
X = self.scaler.transform([features])
return self.network.predict_proba(X)[0]
def digest(self, item):
tokens = self.tokenizer.tokenize(item['log'])
features = []
# firstly output features for contexts
for context in self.tokenizer.contexts:
features.append(int(item['context'] == context))
# secondly output features for tests
for test in self.tokenizer.tests:
features.append(int(item['test'] == test))
# thirdly output features for tokens
for token in self.tokenizer.tokens:
features.append(tokens[token])
# When "merged" is none it's unknown, lets use -1 for that
result = item.get('merged')
if result:
result = 1
elif result is not None:
result = 0
return features, result
def train(self, items):
# For sanity checking
not_merged = 0
merged = 0
# Create the data set
X, y = [], []
for i, item in enumerate(items):
if item.get('status') != "failure":
continue
features, result = self.digest(item)
if result == 0:
not_merged += 1
elif result == 1:
merged += 1
else:
continue
X.append(features)
y.append(result)
# Some validation now
if merged + not_merged < 100:
raise RuntimeError("too few training data points: {0}".format(merged + not_merged))
if not_merged < 100:
raise RuntimeError("too little of training data represents non-flakes: {0}".format(not_merged))
if merged < 100:
raise RuntimeError("too little of training data represents flakes: {0}".format(merged))
# Actual neural network training
self.scaler.fit(X)
X = self.scaler.transform(X)
self.network.fit(X, y)
# -----------------------------------------------------------------------------
# The Tokenizer
SPLIT_RE = re.compile(r'[\s]')
TOKEN_REGEXES = {
'SpecialToken:Number': re.compile(r'^\d+(\.\d+)?$'),
'SpecialToken:Percent': re.compile(r'^\d+(\.\d+)?%$'),
'SpecialToken:Time': re.compile(r'\d\d:\d\d:\d\d'),
'SpecialToken:UUID': re.compile(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', re.I),
'SpecialToken:WSCerts': re.compile(r'^/etc/cockpit/ws-certs.d/0-self-signed.cert:[\w/\+]+$'),
}
class Tokenizer1:
top_tokens = 9000
def __init__(self):
self.tokens = collections.defaultdict(int)
self.contexts = set()
self.tests = set()
def tokenize(self, log):
def noise(line):
return line.startswith("Journal extracted") or \
line.startswith("Wrote ") or \
line.startswith("Warning: Permanently added") or \
line.startswith("not ok ") or \
line.startswith("# Flake") or \
line.startswith("# ---------------") or \
line.strip() == "#"
# Filter out noise lines
log = "\n".join(filter(lambda x: not noise(x), log.split('\n')))
result = [ ]
def unify_token(token):
if len(token) < 4:
return None
elif len(set(token)) == 1:
# omit stuf like "--------------------------"
return None
else:
for name, regex in TOKEN_REGEXES.items():
if regex.match(token):
return name
return token
split = SPLIT_RE.split(log)
for i in split:
unified = unify_token(i)
if unified is not None:
result.append(unified)
tokens = collections.defaultdict(int)
for token in result:
tokens[token] = tokens.get(token, 0) + 1
return tokens
def parse(self, items, verbose=False):
tokens = collections.defaultdict(int)
contexts = set()
tests = set()
for item in items:
for token, count in self.tokenize(item['log']).items():
tokens[token] = tokens.get(token, 0) + count
contexts.add(item['context'])
tests.add(item['test'])
# Get the top number of tokens
usetokens = []
for token, count in sorted(tokens.items(), key=operator.itemgetter(1), reverse=True):
usetokens.append(token)
if len(usetokens) == self.top_tokens:
break
self.tokens = usetokens
self.contexts = sorted(contexts)
self.tests = sorted(tests)
| lgpl-2.1 |
abhishekgahlot/scikit-learn | sklearn/neighbors/regression.py | 39 | 10464 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
eclee25/flu-SDI-simulations-age | age_perc_vaxcov_viz.py | 1 | 4602 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/23/13
###Purpose: visualize results of time-based epidemic simulations
#### pairs with age_perc_T_time.py
###Import data: pickled datasets
###Command Line: python age_perc_T_time.py
##############################################
####### notes #######
### codebook of age class codes
# '1' - Toddlers: 0-2
# '2' - Preschool: 3-4
# '3' - Children: 5-18
# '4' - Adults: 19-64
# '5' - Seniors: 65+ (community)
# '6' - Elders: 65+ (nursing home)
# There are only 94 "elders" in the Vancouver network, and they all reside in one nursing home, so they can be combined with the seniors for analysis purposes (all_elderly).
### packages/modules ###
import pickle
import matplotlib.pyplot as plt
import numpy as np
### pickled data parameters ###
numsims = 50 # number of simulations
vaxcovlist = np.linspace(0, .2, num=21, endpoint=True) # vax coverage
# T = 0.065 # ~20% AR in naive population
T = 0.077 # ~40% AR in naive population
cov_fixed = 0.245 # reference value for OR vs vaxeff plot
### import pickled data ###
pname1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Pickled/d_epiOR_cov_%ssims_T%.3f_eff%.3f-%.3f' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
pname3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Pickled/covepi_cov_%ssims_T%.3f_eff%.3f-%.3f' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
pname4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Pickled/effepi_cov_%ssims_T%.3f_eff%.3f-%.3f' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
pname5 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Pickled/d_episize_cov_%ssims_T%.3f_eff%.3f-%.3f' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
pname6 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Pickled/d_simepi_cov_%ssims_T%.3f_eff%.3f-%.3f' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
pname7 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Pickled/d_numepi_cov_%ssims_T%.3f_eff%.3f-%.3f' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
d_epiOR = pickle.load(open(pname1, "rb"))
cov_epi = pickle.load(open(pname3, "rb"))
eff_epi = pickle.load(open(pname4, "rb"))
d_episize = pickle.load(open(pname5, "rb"))
d_simepi = pickle.load(open(pname6, "rb"))
d_numepi = pickle.load(open(pname7, "rb"))
##############################################
### RESULTS: OR by vax efficacy w/ error bars ###
# plot
plt.errorbar(eff_epi, [np.mean(d_epiOR[cov]) for cov in cov_epi], yerr = [np.std(d_epiOR[cov]) for cov in cov_epi], marker = 'o', color = 'black', linestyle = 'None')
plt.xlabel('random vax efficacy (cov = 0.245)')
plt.ylabel('OR, child:adult')
figname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Figures/epiOR_cov_%ssims_T%.3f_eff%.3f-%.3f.png' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
# plt.savefig(figname)
# plt.close()
plt.show()
##############################################
### DIAGNOSTICS: epidemic size w/ error by T ###
# grab episize info from epidemic results
for cov in sorted(cov_epi):
d_episize[cov] = [d_simepi[key][2] for key in d_simepi if cov == key[0]]
### plot episize by vax efficacy ###
plt.errorbar(eff_epi, [np.mean(d_episize[cov]) for cov in cov_epi], yerr=[np.std(d_episize[cov]) for cov in cov_epi], marker='o', color='black', linestyle='None')
plt.xlabel('random vax efficacy (cov = 0.245)')
plt.ylabel('epidemic size')
figname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Figures/episize_cov_%ssims_T%.3f_eff%.3f-%.3f.png'%(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
# plt.savefig(figname)
# plt.close()
plt.show()
##############################################
### DIAGNOSTICS: number of epidemics by T ###
# grab number of epidemics from epidemic results
for cov in sorted(cov_epi):
d_numepi[cov] = len([d_simepi[key][2] for key in d_simepi if cov == key[0]])
# plot number of epidemics by vax efficacy
plt.plot(eff_epi, [d_numepi[cov] for cov in cov_epi], marker='o', color='black')
plt.xlabel('random vax efficacy (cov = 0.245)')
plt.ylabel('number of epidemics')
figname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Figures/numepi_cov_%ssims_T%.3f_eff%.3f-%.3f.png' %(numsims, T, (min(vaxcovlist)/cov_fixed), (max(vaxcovlist)/cov_fixed))
# plt.savefig(figname)
# plt.close()
plt.show()
| mit |
liangz0707/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/py-misopy/package.py | 5 | 1114 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMisopy(PythonPackage):
"""MISO (Mixture of Isoforms) is a probabilistic framework that
quantitates the expression level of alternatively spliced genes from
RNA-Seq data, and identifies differentially regulated isoforms or exons
across samples."""
homepage = "http://miso.readthedocs.io/en/fastmiso/"
url = "https://pypi.io/packages/source/m/misopy/misopy-0.5.4.tar.gz"
version('0.5.4', sha256='377a28b0c254b1920ffdc2d89cf96c3a21cadf1cf148ee6d6ef7a88ada067dfc')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('samtools')
depends_on('bedtools2')
| lgpl-2.1 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/transformer/example.py | 21 | 2118 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import transformer
import numpy as np
import matplotlib.pyplot as plt
# %% Create a batch of three images (1600 x 1200)
# %% Image retrieved from:
# %% https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('cat.jpg')
im = im / 255.
im = im.reshape(1, 1200, 1600, 3)
im = im.astype('float32')
# %% Let the output size of the transformer be half the image size.
out_size = (600, 800)
# %% Simulate batch
batch = np.append(im, im, axis=0)
batch = np.append(batch, im, axis=0)
num_batch = 3
x = tf.placeholder(tf.float32, [None, 1200, 1600, 3])
x = tf.cast(batch, 'float32')
# %% Create localisation network and convolutional layer
with tf.variable_scope('spatial_transformer_0'):
# %% Create a fully-connected layer with 6 output nodes
n_fc = 6
W_fc1 = tf.Variable(tf.zeros([1200 * 1600 * 3, n_fc]), name='W_fc1')
# %% Zoom into the image
initial = np.array([[0.5, 0, 0], [0, 0.5, 0]])
initial = initial.astype('float32')
initial = initial.flatten()
b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')
h_fc1 = tf.matmul(tf.zeros([num_batch, 1200 * 1600 * 3]), W_fc1) + b_fc1
h_trans = transformer(x, h_fc1, out_size)
# %% Run session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
y = sess.run(h_trans, feed_dict={x: batch})
# plt.imshow(y[0])
| bsd-2-clause |
gyllstar/appleseed | ext/results/plot_preinstall.py | 2 | 8454 | import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import scipy.stats
import os, glob
#from argparse import ArgumentParser
#parser = ArgumentParser(description="plot_loss_rate PCount Results")
#parser.add_argument("--file", dest="data_file",type=str,help="data file to used to generate plot_loss_rate",default='pcount-results-l10-u5.csv ')
#args = parser.parse_args()
def compute_standard_deviation(error,n):
zval=1.645
n = n**0.5
tmp = float(error) * n
tmp = float(tmp)/zval
return tmp
def mean_confidence_interval2(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m,h
def compute_ptree_indices(basic_data,merger_data):
all_ptrees=basic_data['ptrees']
indx=0
new_ptree_num=True
prev_ptree_num=int(all_ptrees[0])
ptree_indices=[]
for data in range(0,len(all_ptrees)):
curr_ptree_num = int(all_ptrees[indx])
if curr_ptree_num != prev_ptree_num:
ptree_indices.append(indx-1)
prev_ptree_num = curr_ptree_num
indx+=1
ptree_indices.append(len(all_ptrees)-1)
return ptree_indices
def plot_msgs(graph_num,basic_file,merger_file):
#num_primary_trees,num_affected_trees,total_pt_nodes,total_bt_nodes,total_overlap_nodes,total_msgs,total_unique_edges,total_garbag
basic_data = np.genfromtxt(basic_file, delimiter=',',names=['ptrees', 'num_affected','pt_nodes', 'bt_nodes','overlap','msgs','unique_edges', 'garbage' ])
#merger_data = np.genfromtxt(merger_file, delimiter=',', names=['ptrees', 'num_affected','pt_nodes', 'bt_nodes','overlap','msgs','unique_edges', 'garbage' ])
merger_data = np.genfromtxt(merger_file, delimiter=',', names=['ptrees', 'num_affected','pt_nodes', 'bt_nodes','overlap','msgs','unique_edges', 'garbage', 'num_edges','num_pt_edges','avg_pt_link_load','num_pt_reuses'])
#total_num_graph_edges,total_num_pt_edges,avg_ptree_link_load,num_pt_reuse_rules
all_ptrees=basic_data['ptrees']
all_affected_trees=basic_data['num_affected']
all_b_msgs=basic_data['msgs']
all_b_garbage=basic_data['garbage']
all_m_msgs=merger_data['msgs']
all_m_garbage=merger_data['garbage']
all_pt_nodes = basic_data['pt_nodes']
all_bt_nodes = basic_data['bt_nodes']
ptree_indices=compute_ptree_indices(basic_data, merger_data)
i=0
b_msgs=[]
m_msgs=[]
b_garbage=[]
m_garbage=[]
b_msgs_error=[]
m_msgs_error=[]
b_garbage_error=[]
m_garbage_error=[]
num_affected_trees=[]
num_affected_trees_error=[]
num_pt_nodes=[]
num_pt_nodes_error=[]
num_bt_nodes=[]
num_bt_nodes_error=[]
num_ptrees=[]
for j in ptree_indices:
num_ptrees.append(all_ptrees[i])
b_msg_tmp = all_b_msgs[i:j]
m_msg_tmp = all_m_msgs[i:j]
b_garbage_tmp = all_b_garbage[i:j]
m_garbage_tmp = all_m_garbage[i:j]
affected_trees_tmp = all_affected_trees[i:j]
pt_nodes_tmp = all_pt_nodes[i:j]
bt_nodes_tmp = all_bt_nodes[i:j]
b_mn,b_err = mean_confidence_interval(b_msg_tmp)
b_msgs.append(b_mn)
b_msgs_error.append(b_err)
m_mn,m_err = mean_confidence_interval(m_msg_tmp)
m_msgs.append(m_mn)
m_msgs_error.append(m_err)
b_mn,b_err = mean_confidence_interval(b_garbage_tmp)
b_garbage.append(b_mn)
b_garbage_error.append(b_err)
m_mn,m_err = mean_confidence_interval(m_garbage_tmp)
m_garbage.append(m_mn)
m_garbage_error.append(m_err)
mn,err = mean_confidence_interval(affected_trees_tmp)
num_affected_trees.append(mn)
num_affected_trees_error.append(err)
mn,err = mean_confidence_interval(pt_nodes_tmp)
#mn=mn/float(num_ptrees[-1])
#err=err/float(num_ptrees[-1])
mn=mn/float(num_ptrees[-1])
err=err/float(num_ptrees[-1])
num_pt_nodes.append(mn)
num_pt_nodes_error.append(err)
mn,err = mean_confidence_interval(bt_nodes_tmp)
mn=mn/float(num_affected_trees[-1])
err=err/float(num_affected_trees[-1])
num_bt_nodes.append(mn)
num_bt_nodes_error.append(err)
i=j+1
plt.clf()
print '\n\t experiment stats'
print '\t ---------------------------------------------------------------------------------------------------------------'
print '\t\t debugging: data file ptrees indices: %s' %(ptree_indices)
print '\t\t number of ptrees=%s' %(num_ptrees)
print '\t\t number of basic msgs=%s'%(b_msgs)
print '\t\t number of merger msgs=%s' %(m_msgs)
avg_gap=[]
percent_pt_node_reuse=[]
for cnt in range(0,len(b_msgs)):
gap = b_msgs[cnt] - m_msgs[cnt]
avg_gap.append(gap)
msgs_per_bt = b_msgs[cnt]/float(num_affected_trees[cnt])
avg_pt_reuses = num_bt_nodes[cnt] - msgs_per_bt
percent_pt_node_reuse.append(avg_pt_reuses/float(num_bt_nodes[cnt]))
print '\t\t basic - merger = %s' %(avg_gap)
print '\t\t number of affected trees = %s' %(num_affected_trees)
print '\t\t mean # nodes in PT tree = %s' %(num_pt_nodes)
print '\t\t mean # nodes in BT tree = %s' %(num_bt_nodes)
print '\t\t mean percent of PT node reuse = %s, + individual results=%s' %(np.mean(percent_pt_node_reuse),percent_pt_node_reuse)
print '\t ---------------------------------------------------------------------------------------------------------------\n'
x_upper_bound = int(graph_num)/2+1
plt.errorbar(num_ptrees, b_msgs, yerr=b_msgs_error, linewidth=1, marker="o", color='black',label="basic")
plt.errorbar(num_ptrees, m_msgs, yerr=m_msgs_error, linewidth=1, marker="o", color='blue',label="merger")
plt.xlabel("Number of Primary Trees",fontsize=14)
plt.ylabel("Number of Control Messages",fontsize=14)
plt.xlim(0,x_upper_bound)
plt.legend(loc='upper left')
#plt.show()
fig_name = figs_folder + 'msgs-ieee%s' %(graph_num) + ".pdf"
print "\t writing results to %s" %(fig_name)
plt.savefig(fig_name,bbox_inches='tight')
plt.clf()
plt.errorbar(num_ptrees, b_garbage, yerr=b_garbage_error, linewidth=1, marker="o", color='black',label="basic")
plt.errorbar(num_ptrees, m_garbage, yerr=m_garbage_error, linewidth=1, marker="o", color='blue',label="merger")
plt.xlabel("Number of Primary Trees",fontsize=14)
plt.ylabel("Number of Stale Flow Entries",fontsize=14)
plt.xlim(0,x_upper_bound)
plt.legend(loc='upper left')
#plt.show()
fig_name = figs_folder + 'garbage-ieee%s' %(graph_num) + ".pdf"
print "\t writing results to %s" %(fig_name)
plt.savefig(fig_name,bbox_inches='tight')
plt.clf()
plt.errorbar(num_ptrees, num_affected_trees, yerr=num_affected_trees_error, linewidth=1, marker="o", color='red')
plt.xlabel("Number of Primary Trees",fontsize=14)
plt.ylabel("Number of Affected Trees",fontsize=14)
plt.legend(loc='upper left')
#plt.show()
fig_name = figs_folder + 'affected-trees-ieee%s' %(graph_num) + ".pdf"
print "\t writing results to %s" %(fig_name)
plt.savefig(fig_name,bbox_inches='tight')
plt.clf()
plt.scatter(all_ptrees,all_b_msgs,marker='x',color='black',s=40,label="basic")
plt.scatter(all_ptrees,all_b_msgs,marker='o',color='blue',s=10,label="merger")
plt.xlabel("Number of Primary Trees",fontsize=14)
plt.ylabel("Number of Control Messages",fontsize=14)
plt.legend(loc='upper left')
fig_name = figs_folder + 'scatter-msgs-ieee%s' %(graph_num) + ".pdf"
print "\t writing results to %s" %(fig_name)
plt.savefig(fig_name,bbox_inches='tight')
#os._exit(0)
def get_graph_num(file_name):
# input: 'backup-msg-merger-ieee30
substr = file_name.split("-")[-1]
num_as_str = substr.lstrip('ieee')
return int(num_as_str)
def plot_graph_num(graph_num,file_names):
basic_file=None
merger_file = None
for f in file_names:
base_name = f.split(".")[0]
base_name = base_name.split("/")[-1]
file_num = get_graph_num(base_name)
if file_num == graph_num:
if 'basic' in f:
basic_file = f
elif 'merger' in f:
merger_file = f
if basic_file == None or merger_file == None:
print '\n skipping IEEE %s because missing data files' %(graph_num)
return
print '\n\n plotting IEEE %s with files %s %s' %(graph_num,merger_file,basic_file)
plot_msgs(graph_num,basic_file,merger_file) #maybe # of affected trees
msgs_folder = 'msgs/'
figs_folder = 'msgs/figs/'
base_file_pattern = 'backup-msg-*'
ieee_graph_nums = [14,30,57,118,300]
file_names = glob.glob(msgs_folder + base_file_pattern + '.csv')
for graph_num in ieee_graph_nums:
plot_graph_num(graph_num, file_names) | gpl-3.0 |
wavelets/office-nfl-pool | make_predictions.py | 6 | 14756 | """
make_predictions.py
This is the main program that runs the prediction.
Output:
'excel_files/prediction_season2015_week{i}.xlsx'
in which the 'i' is the week number of the prediction.
Usage:
1. Edit the file excel_files/season2015_datasheet.xlsx
so that it contains the recent game outcomes.
2. Run this function using Python 3:
`python3 make_predictions.py`
Dependencies:
'transform.py'
Helper functions to merge, smooth, and add lags
to our data (using Pandas).
'data/nfl_season2008to2014.csv'
NFL game outcomes pulled from Wikipedia, augmented
with data donated by TeamRankings.com
'excel_files/season2015_datasheet.xlsx'
A partially filled spreadsheet of game data
that you must fill out as the season goes.
"""
from __future__ import print_function
import os
import numpy as np
import pandas as pd
from datetime import date
from openpyxl import load_workbook, styles
from sklearn import ensemble, linear_model
# Our local module
import transform
season2015_filename = os.path.join('excel_files', 'season2015_datasheet.xlsx')
prior_seasons_filename = os.path.join('data', 'nfl_season2008to2014.csv')
output_filename = os.path.join('excel_files', 'prediction.xlsx')
print("Here we go...")
print("Reading historical data from {}".format(prior_seasons_filename))
print("Reading this season's data from {}".format(season2015_filename))
##--------------------------------------------------- Read the Data ------##
# - Combine the historical data with this season's data.
# --> this part is the most involved because we have to be
# sure the column names are the same when we join things up.
#
# - Read the new datasheet for this season
# --> columns: Week, Date, Day of Week,
# Home Team, Home Points, Away Team, Away Points,
# Vegas Spread, Home Fumbles, Home Penalty Yards,
# Away Fumbles, Away Penalty Yards
season2015_bygame = pd.read_excel(season2015_filename)
# - Read the data from seasons 2008-2014
# --> columns: Season, Category, Week, Team, Opponent, AtHome,
# Points, PointsAllowed, Date, Stadium, Overtime,
# VegasSpread, VegasTotal, VegasWin, Interceptions,
# Sacks, Fumbles, PenaltyYards
prior_data_byteam = pd.read_csv(prior_seasons_filename)
del prior_data_byteam['Stadium'], prior_data_byteam['Overtime']
del prior_data_byteam['VegasTotal'], prior_data_byteam['VegasWin']
##---------------------------------------------- Transform the Data ------##
# 1. Make each game entry in the new dataset into two rows (one per opponent)
home = season2015_bygame[[
'Week', 'Home Team', 'Away Team',
'Home Points', 'Away Points',
'Vegas Spread', 'Home Fumbles', 'Home Penalty Yards']].copy()
# for away, swap the order of the Home/Away Team and Points
away = season2015_bygame[[
'Week', 'Away Team', 'Home Team', # Swap 'Away' and 'Home'
'Away Points', 'Home Points',
'Vegas Spread', 'Away Fumbles', 'Away Penalty Yards']].copy()
columns = [
'Week', 'Team', 'Opponent', 'Points', 'PointsAllowed',
'VegasSpread', 'Fumbles', 'PenaltyYards']
home.columns = columns
away.columns = columns
home['AtHome'] = True
away['AtHome'] = False
# When assigning to existing columns you must use
# '.ix' '.iloc' or '.loc' because the '[]' operator
# sometimes returns a copy of the contents instead
# of accessing the original contents.
away.ix[:, 'VegasSpread'] = - away.VegasSpread
season2015_byteam = pd.concat([home, away])
# The 'pd.concat' means now some row indices may be
# duplicated. Reset them, and since we don't care what they
# were originally, drop the auto-generated column with the old indices.
season2015_byteam.reset_index(drop=True, inplace=True)
# There are no 'bye' weeks in the schedule, so
# add them ourselves by joining against another
# DataFrame that has every combination of Week, Team.
# Entries without an opponent are bye weeks.
all_teams = list(home.Team.unique())
all_weeks = list(home.Week.unique())
all_teams_weeks = pd.DataFrame(dict(
Team = np.repeat(all_teams, len(all_weeks)),
Week = all_weeks * len(all_teams)
))
season2015_byteam = season2015_byteam.merge(
all_teams_weeks, on=['Team', 'Week'], how='outer')
season2015_byteam['Season'] = 2015 # Add the season
# Make the 'bye' weeks be at home
season2015_byteam.ix[season2015_byteam.AtHome.isnull(), 'AtHome'] = True
# 2. Combine the two datasets.
# (make sure the matching columns are spelled the same)
df = pd.concat([prior_data_byteam, season2015_byteam])
df.reset_index(drop=True, inplace=True)
del prior_data_byteam, season2015_byteam, home, away
# 3. Add new derived columns
transform.add_derived_columns(df)
# 3a. Add a rolling mean / weighted moving average to the data.
transform.add_rolling_mean(df, ['Fumbles', 'Interceptions', 'Sacks'], prefix='m_')
transform.add_ewma(df, ['PenaltyYards', 'Points', 'PointsAllowed'], prefix='ewma_')
# 3b. Add lags so data on a given row has information from prior games.
transform.add_lag(df, [
'm_Fumbles', 'm_Interceptions', 'm_Sacks',
'ewma_PenaltyYards', 'ewma_Points', 'ewma_PointsAllowed'
],
prefix='lag_')
# 3c. Select out the columns we want in the model
df = df[[
# 'Spread' is our dependent variable
'Spread',
# These won't necessarily be in the model but we need them to convert between
# 'bygame' (one row per game) and 'byteam' (one row per team) format
'Season', 'Week', 'Team', 'Opponent', 'AtHome',
# This we want if we ever are going to predict points
'Points', 'PointsAllowed',
# These we do want in the model
'VegasSpread', 'LastWkBye',
'lag_m_Fumbles', 'lag_m_Interceptions',
'lag_m_Sacks', 'lag_ewma_PenaltyYards',
'lag_ewma_Points', 'lag_ewma_PointsAllowed'
]]
# 4. Convert to 'bygame' (one row per game) format
df = transform.from_byteam_to_bygame(
df,
dont_mirror=['Spread', 'VegasSpread', 'Points', 'PointsAllowed'])
##----------------------------------------- Build the Picking Model ------##
# Separate the dependent and independent variables.
win = df.Spread > 0
# Pick the colums to use... ::NOW YOU:: ...you can edit these...
input_df = df[[
'Home', # 'Away', # maybe overkill to add the teams...
# ... but you can comment them out
'H_LastWkBye', 'H_lag_m_Fumbles', 'H_lag_ewma_PenaltyYards',
'H_lag_ewma_Points', 'H_lag_ewma_PointsAllowed',
'A_LastWkBye', 'A_lag_m_Fumbles', 'A_lag_ewma_PenaltyYards',
'A_lag_ewma_Points', 'A_lag_ewma_PointsAllowed'
]]
# Convert to dummy variables
input_data = pd.get_dummies(input_df)
# Discard 'bye' weeks and weeks that have nulls
# nulls thanks to the lag operation
viable_input = input_data.notnull().all(axis=1)
# Split the training and test set.
# - Future weeks have no spread data but do have an 'Away' team
# (meaning they're not 'bye' weeks)
future_weeks = (df.Spread.isnull() & df.Away.notnull())
# Set up the gradient boosting classifier model
gbc_model = ensemble.GradientBoostingClassifier(max_depth=5)
# Train
train = viable_input & ~future_weeks
gbc_model.fit(input_data[train], win[train])
# Make the prediction
if sum(viable_input & future_weeks) == 0:
print("No viable data available for prediction.")
print("Columns are: {}\n".format(input_df.columns))
# The output is one column per output category (in our case False, True)
prediction = gbc_model.predict_proba(input_data[viable_input & future_weeks])
win_probability = prediction[:, 1]
# Merge the prediction back into the other data
result = df[viable_input & future_weeks][['Season', 'Week', 'Home', 'Away']]
result['WinProbability'] = win_probability
result.sort(['Season', 'Week','WinProbability'], inplace=True)
result['Confidence'] = result.groupby(['Season','Week']).WinProbability.rank()
# Rename the columns for merge with the season2015_bygame (want the date and day of week)
result.columns = ['Season', 'Week', 'Home Team', 'Away Team', 'Win Probability', 'Confidence']
result = result.merge(season2015_bygame, on=['Week', 'Home Team', 'Away Team'])
result = result[[
'Season', 'Week', 'Home Team', 'Away Team',
'Date', 'Day of Week', 'Win Probability', 'Confidence']]
result.sort('Date', inplace=True)
##-------------------------------- Build the Score Prediction Model ------##
# Recreate a 'by team' view -- but this time each row has the opponent's
# moving average data as well.
#
# Remember to correctly swap all 'Home' and 'Away' labels for 'Team' and 'Opponent'
# in the home team, or the opposite for the away team.
home = df.copy()
away = df.copy()
home.columns = ['Team' if c =='Home' else 'Opponent' if c=='Away' else c for c in home.columns]
away.columns = ['Team' if c =='Away' else 'Opponent' if c=='Home' else c for c in away.columns]
home.columns = [c[2:] if c.startswith('H_') else 'O_' + c[2:] if c.startswith('A_') else c for c in home.columns]
away.columns = [c[2:] if c.startswith('A_') else 'O_' + c[2:] if c.startswith('H_') else c for c in away.columns]
home.columns = ['_' if c =='PointsAllowed' else c for c in home.columns]
away.columns = ['_' if c =='Points' else 'Points' if c=='PointsAllowed' else c for c in away.columns]
home['AtHome'] = True
away['AtHome'] = False
df_byteam = pd.concat([home, away])
# Exclude the rows where 'Team' is null (duplicate bye weeks)
df_byteam = df_byteam[df_byteam.Team.notnull()]
# Reset the index since now there are duplicate row indices
df_byteam.reset_index(drop=True, inplace=True)
## Prepare the data
# Separate out the dependent variable
points = df_byteam.Points
# Pick the colums to use... ::NOW YOU:: ...you can edit...
input_df_byteam = df_byteam[[
'Team', #'Opponent', Again maybe overkill and overfit...but you can change it.
'AtHome',
'LastWkBye', 'lag_m_Fumbles', 'lag_ewma_PenaltyYards',
'lag_ewma_Points', 'lag_ewma_PointsAllowed',
'O_LastWkBye', 'O_lag_m_Fumbles', 'O_lag_ewma_PenaltyYards',
'O_lag_ewma_Points', 'O_lag_ewma_PointsAllowed'
]]
# Convert to dummy variables
input_data_byteam = pd.get_dummies(input_df_byteam)
# Discard 'bye' weeks and weeks that have nulls
# thanks to the lag operation (axis=1 means do it row-wise)
viable_input = input_data_byteam.notnull().all(axis=1)
# Split the training and test set -- make 'future_weeks' bet the test data
# - Future weeks have no Points data but do have an 'Opponent'
# (meaning they're not 'bye' weeks)
future_weeks = df_byteam.Points.isnull() & df_byteam.Opponent.notnull()
# Set up the ridge regression model
ridge_model = linear_model.Ridge()
# Train
train = viable_input & ~future_weeks
ridge_model.fit(input_data_byteam[train], points[train])
# Make the prediction
if sum(viable_input & future_weeks) == 0:
print("No viable data available for prediction.")
print("Columns are: {}\n".format(input_df_byteam.columns))
# The output is one column per output category (in our case False, True)
predicted_points = ridge_model.predict(input_data_byteam[viable_input & future_weeks])
# Merge the prediction back into the other data
result_byteam = df_byteam[viable_input & future_weeks][['Season', 'Week', 'Team', 'Opponent', 'AtHome']]
result_byteam['Predicted Points'] = predicted_points
home_result = result_byteam[result_byteam.AtHome]
away_result = result_byteam[~result_byteam.AtHome]
home_result.columns = ['Home' if c == 'Team' else 'Away' if c == 'Opponent' else c for c in home_result.columns]
away_result.columns = ['Away' if c == 'Team' else 'Home' if c == 'Opponent' else c for c in away_result.columns]
away_result.columns = [c +' Allowed' if c.endswith('Points') else c for c in away_result.columns]
del home_result['AtHome'], away_result['AtHome']
points_result = home_result.merge(away_result, on=['Season', 'Week', 'Home', 'Away'])
points_result.columns = [c + ' Team' if c in ('Home', 'Away') else c for c in points_result.columns]
# And finally, merge the points_result with the previously determined win result.
result = result.merge(points_result, on=['Season', 'Week', 'Home Team', 'Away Team'])
##------------------------------------------------- Output to Excel ------##
#
# Default for Pandas is to draw a border around the header columns.
# The below explicitly turns that off.
# (Otherwise writing to .xlsx breaks for some reason)
pd.core.format.header_style = None
# Open an excel workbook and append to it.
file_already_exists = os.path.isfile(output_filename)
if file_already_exists:
book = load_workbook(output_filename)
with pd.ExcelWriter(output_filename, engine='openpyxl') as workbook:
if file_already_exists:
workbook.book = book
workbook.sheets = dict((ws.title, ws) for ws in book.worksheets)
sheet_name = date.today().strftime('prediction on %d %b %Y')
result.to_excel(
workbook,
sheet_name=sheet_name,
index=False) # don't show the row numbers
sheet = workbook.sheets[sheet_name]
# Set column widths. You have to change this if you add columns
col_widths = zip('ABCDEFGHIJ', (9, 16, 12, 20, 12, 20, 12, 16, 14, 14))
for col, width in col_widths:
sheet.column_dimensions[col].width = width
# Bold and center the headers
cell = col + "1"
try:
sheet[cell].font = styles.Font(bold=True)
sheet[cell].alignment = styles.Alignment(horizontal = 'center')
except TypeError:
sheet[cell].style.font = styles.fonts.Font()
sheet[cell].style.font_style = styles.fonts.Font.bold = True
sheet[cell].style.alignment = styles.alignment.Alignment()
sheet[cell].style.alignment_style = styles.alignment.Alignment.HORIZONTAL_CENTER
# Make a horizontal boundary between each week
try:
separator = styles.borders.Border(bottom=styles.borders.Side(style='thin'))
except AttributeError:
separator = styles.borders.Border()
separator.border_style = styles.borders.Border.BORDER_THIN
for row_offset in result[['Week','Home Team']].groupby('Week').count().cumsum().values:
for col_offset in range(result.shape[1]):
try:
sheet.cell( row=row_offset[0]+1, column=col_offset+1
).border = separator
except AttributeError:
sheet.cell( row=row_offset[0]+1, column=col_offset+1
).style.border = separator
# Finished.
print("Finished.")
print("The prediction is in {} on tab {}\n".format(output_filename, sheet_name))
| mit |
kchodorow/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
magic2du/contact_matrix | Contact_maps/mnist_psuedo_ipython_dl_ppi/code/DL_Stacked_Model_Mnist_Psuedo_05_18_2015.py | 1 | 39575 |
# coding: utf-8
# In[1]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
#import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pdb, PIL
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
import sys, os
sys.path.append('../../../libs/')
sys.path.append('../libs/')
sys.path.append('../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pdb
import pickle
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
# from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
import cPickle
import gzip
import numpy
import os
import theano
from theano.tensor.shared_randomstreams import RandomStreams
import time
from dlnn.io_func.model_io import _nnet2file, _cfg2file, _file2nnet, log
from dlnn.utils.learn_rates import _lrate2file, _file2lrate
from dlnn.models.dnn import DNN
from dlnn.models.dropout_nnet import DNN_Dropout
from dlnn.models.sda import SdA, Sda_xy
import theano.tensor as T
from dlnn.utils.network_config import NetworkConfig
from dlnn.utils.sda_config import SdAConfig
from dlnn.utils.utils import parse_arguments, save_two_integers, read_two_integers
from dlnn.learning.sgd import train_sgd, validate_by_minibatch
from dlnn.utils.utils import shared_dataset_X
from numpy import dtype, shape
from dlnn.factories import Sda_factory, Sda_xy_factory, DNN_factory, Parellel_Sda_factory
# In[6]:
from DL_libs import performance_score, Preprocessing_Scaler_with_mean_point5, cal_epochs, pretrain_a_Sda_with_estop, train_a_Sda, trainSda
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# In[2]:
# set settings for this script
settings = {}
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_Log'] = 0
settings['SAE_SVM'] = 0
settings['SAE_SVM_RBF'] = 0
settings['SAE_SVM_POLY'] = 0
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 0
settings['number_iterations'] = 1
settings['finetune_lr'] = 0.01
settings['batch_size'] = 30
settings['pretraining_interations'] = 20000#10000
settings['pretrain_lr'] = 0.001
#settings['training_epochs'] = 300 #300
settings['training_interations'] = 50002 #300
settings['hidden_layers_sizes'] = [1000, 1000]
settings['corruption_levels'] = [0, 0]
settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000] # use all examples
settings['test_set_from_test'] = True
settings['Sda_new'] = 1
settings['DL_xy'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_COMBO'] = 0
settings['SVM_RBF'] = 1
settings['SAE_SVM_RBF'] = 0
settings['SAE_SVM_RBF_COMBO'] = 0
settings['SVM_POLY'] = 0
settings['DL_S'] = 1
settings['DL_U'] = 0
settings['DL_S_new'] = 1
settings['Sda_xy_with_first'] = 1
settings['DL_S_new_contraction'] = 1
settings['DL_S_new_sparsity'] = 1
settings['DL_S_new_weight_decay'] = 2
settings['DL_S_new_Drop_out'] = 1
cfg = settings.copy()
cfg['learning_rate'] = 0.001 # this is for pretraining
#cfg['train-data'] = "/home/du/Dropbox/Project/libs/dlnn/cmds/train.pickle.gz,partition=600m"
#cfg['batch_size'] = 100
cfg['wdir'] = '/home/du/Documents/tmp'
cfg['param-output-file']= 'sda.mdl'
cfg['sparsity'] = 0
cfg['sparsity_weight'] = 0
# 1 means use equal weight for X. Y, 0 means use the scaled weight Y is set 1/size of X, None means use the original vector.
cfg['n_outs'] = 1
#cfg['lrate'] ='C:0.08:5 ' #'D:1:0.5:0.05,0.005:5000'
cfg['lrate_pre'] ='D:0.01:0.5:0.05,0.005:'
'''
constant --lrate="C:l:n"
Eg. C:0.08:15
run n iterations with lrate = l unchanged
newbob
--lrate="D:l:c:dh,ds:n"
Eg. D:0.08:0.5:0.05,0.05:8
starts with the learning rate l; if the validation error reduction between two consecutive epochs is less than dh, the learning rate is scaled by c during each of the remaining epochs. Traing finally terminates when the validation error reduction between two consecutive epochs falls below ds. n is the minimum epoch number after which scaling can be performed.
min-rate newbob
--lrate="MD:l:c:dh,ml:n"
Eg. MD:0.08:0.5:0.05,0.0002:8
the same as newbob, except that training terminates when the learning rate value falls below ml
fixed newbob
--lrate="FD:l:c:eh,es"
Eg. FD:0.08:0.5:10,6 starts with the learning rate l; after eh epochs, the learning rate starts to be scaled by c. Traing terminates when doing another es epochs after scaling starts. n is the minimum epoch number after which scaling can be performed.
'''
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[3]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[5]:
################## generate data from training set###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
# In[7]:
#pylab.imshow(imageB.reshape(28, 28), cmap="Greys")
# In[8]:
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
with_auc_score = settings['with_auc_score']
n_outs = settings['n_outs']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole.shape
x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
for number_of_training in settings['number_of_training']:
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training],
y_train_pre_validation[:number_of_training],\
test_size=0.2, random_state=21)
'''
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:],
y_train_pre_validation[:],\
test_size=0.4, random_state=21)
'''
print x_train.shape, y_train.shape, x_validation.shape, \
y_validation.shape, x_test.shape, y_test.shape
x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test
train_X_reduced = x_train
train_y_reduced = y_train
test_X = x_test
test_y = y_test
y_train_minmax = y_train
y_validation_minmax = y_validation
y_test_minmax = y_test
###original data###
################ end of data ####################
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, y_train)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
settings['lrate'] = settings['lrate_pre'] + str(training_epochs)
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr, n_outs = n_outs
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_Log']:
print 'SAE followed by Log'
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = log_clf_l2.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'SAE_Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole)
new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole)
new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole)
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train,
new_x_validationt_minmax_whole, y_validation ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
settings['epoch_number'] = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
# deep xy autoencoders
settings['n_ins'] = x_train_minmax.shape[1]
if settings['DL_xy']:
cfg = settings.copy()
cfg['weight_y'] = 0.1
print 'DL_xy'
train_x = x_train_minmax; train_y = y_train_minmax
sdaf = Sda_xy_factory(cfg)
sdaf.sda.pretraining(train_x, train_y)
dnnf = DNN_factory(cfg)
dnnf.dnn.load_pretrain_from_Sda(sdaf.sda)
dnnf.dnn.finetuning((x_train_minmax, y_train_minmax),(x_validation_minmax, y_validation_minmax))
training_predicted = dnnf.dnn.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'DL_xy', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
test_predicted = dnnf.dnn.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'DL_xy', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['Sda_xy_with_first']:
cfg = settings.copy()
cfg['weight_y'] = 0.1
cfg['firstlayer_xy'] = 1
print 'firstlayer_xy'
train_x = x_train_minmax; train_y = y_train_minmax
sdaf = Sda_xy_factory(cfg)
sdaf.sda.pretraining(train_x, train_y)
dnnf = DNN_factory(cfg)
dnnf.dnn.load_pretrain_from_Sda(sdaf.sda)
dnnf.dnn.finetuning((x_train_minmax, y_train_minmax),(x_validation_minmax, y_validation_minmax))
training_predicted = dnnf.dnn.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'Sda_xy_with_first', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
test_predicted = dnnf.dnn.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'Sda_xy_with_first', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['Sda_new']:
print 'Sda_new'
cfg = settings.copy()
train_x = x_train_minmax; train_y = y_train_minmax
cfg['n_ins'] = train_x.shape[1]
sdaf = Sda_factory(cfg)
sda = sdaf.sda.pretraining(train_x = train_x)
sdaf.dnn.finetuning((x_train_minmax, y_train_minmax),(x_validation_minmax, y_validation_minmax))
training_predicted = sdaf.dnn.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'Sda_new', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
test_predicted = sdaf.dnn.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'Sda_new', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['DL_S_new']:
# deep learning using split network
print 'new deep learning using split network'
cfg = settings.copy()
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((subset_no, number_of_training, 'DL_S_new', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((subset_no, number_of_training, 'DL_S_new', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['DL_S_new_contraction']:
print 'DL_S_new_contraction'
cfg = settings.copy()
cfg['contraction_level'] = 0.01
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((subset_no, number_of_training, 'DL_S_new_contraction', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((subset_no, number_of_training, 'DL_S_new_contraction', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['DL_S_new_sparsity'] == 1:
print 'DL_S_new_sparsity'
cfg = settings.copy()
cfg['sparsity'] = 0.01
cfg['sparsity_weight'] = 0.01
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((subset_no, number_of_training, 'DL_S_new_sparsity', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((subset_no, number_of_training, 'DL_S_new_sparsity', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['DL_S_new_weight_decay'] == 2:
cfg = settings.copy()
cfg['l2_reg'] =0.01
print 'l2_reg'
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((subset_no, number_of_training, 'l2_reg', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((subset_no, number_of_training, 'l2_reg', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['DL_S_new_weight_decay'] == 1:
print 'l1_reg'
cfg = settings.copy()
cfg['l1_reg'] =0.01
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((subset_no, number_of_training, 'l1_reg', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((subset_no, number_of_training, 'l1_reg', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
if settings['DL_S_new_Drop_out'] == 1:
cfg = settings.copy()
cfg['dropout_factor'] = 0.5
print 'DL_S_new_Drop_out'
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((subset_no, number_of_training, 'DL_S_new_Drop_out', isTest) + tuple(performance_score(train_y_reduced, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((subset_no, number_of_training, 'DL_S_new_Drop_out', isTest) + tuple(performance_score(test_y, test_predicted, with_auc_score).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + \
'_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + \
'_' + str(settings['pretraining_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
# In[9]:
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(cfg)
# In[ ]:
# In[31]:
'''
weights_map_to_input_space = []
StackedNNobject = sda
image_dimension_x = 28*2
image_dimension_y = 28
if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs):
weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_0_layer_weights.png'
image.save(sample_image_path)
weights_map_to_input_space.append(weights_product)
for i_layer in range(1, len(StackedNNobject.dA_layers)):
i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True)
weights_product = np.dot(weights_product, i_weigths)
weights_map_to_input_space.append(weights_product)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png'
image.save(sample_image_path)
'''
# In[18]:
| gpl-2.0 |
soylentdeen/cuddly-weasel | MoogMCMC/MoogMCMC_v3.py | 1 | 3010 | '''
Free Parameters:
B = B_field in kG, allow to vary between 0.0 and 4.0
T = T_eff in K, allow to vary between 3000.0 and 5000.0
logg = logg in dex, allow to vary between 3.0 and 5.0
v = velocity shift (RV + BVC) in km/s, allow to varry between -60.0 and 60.0
maybe vieling and continuum?
'''
import Moog961
import numpy as np
import emcee
import corner
import time
import matplotlib.pyplot as plt
b_true=1.62
t_true=4570
g_true=3.09
v_true=0.0
fig1 = plt.figure(0)
fig1.clear()
ax1 = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
def lnprior(theta): #Priors for Bayesian Model, assuming flat priors
B, T, logg, v = theta
if 0.0 < B < 4.0 and 3000.0 < T < 5000.0 and 3.0 < logg < 5.0 and -60.0 < v <60.0:
return 0.0
return -np.inf
def lnlike(theta): #Log-likelyhood for the posterior, using chi-squared
B, T, logg, v = theta
retval = Score.calc_lnlike(Bfield=B, Teff=T, logg=logg, rv=v, ax=None)
#synth = #casey's code to call the synth model
#return -0.5(np.sum( ((obs-synth)/obs) ** 2.))
return retval
def lnprob(theta): #The probability of accepting the new point.
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta)
Score = Moog961.Score(directory='../MusicMaker/vsini_10/Model_T4*', suffix='', observed='../Tutorial/Blended/Model_T4570_G3.09_B1.62.fits')
mastered = Score.master()
#combine observed phrases into a master phrase
compositeObservedSpectrum, compositeObservedLabel = Score.listen()
blended, blendedLabel = Score.blend(desiredParameters={"TEFF":4570, "LOGG":3.09, "BFIELD":1.62})
compositeObservedLabel.Spectrum.plot(ax=ax1)
blendedLabel[0].Spectrum.plot(ax=ax1)
fig1.show()
raw_input()
"""
#MCMC parameters
nwalkers = 8 #number of walkers used in fitting, must be even and >= 2x ndim
ndim = 4 #number of parameters being fit
#Initialize the chain
pos_min = np.array([1.62, 4570.0, 3.09, 0.0])
pos_max = np.array([1.62, 4570.0, 3.09, 0.0])
psize = pos_max - pos_min
pos = [pos_min + psize*np.random.rand(ndim) for i in range(nwalkers)]
#Setup ensamble sampler from emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=())
#Preform and time burn-in phase
print 'Running first burn-in.'
time0 = time.time()
pos, prob, state = sampler.run_mcmc(pos, 1)
sampler.reset()
time1=time.time()
print 'First burn-in finished, '+str(time1-time0)+' seconds elapsed.'
print 'Running second burn-in.'
p=pos[np.argmax(prob)]
pos=[p+(psize/100)*np.random.randn(ndim) for i in xrange(nwalkers)]
pos, prob, state = sampler.run_mcmc(pos, 1)
sampler.reset()
time2=time.time()
print 'Second burn-in finished, '+str(time2-time1)+' seconds elapsed.'
print 'Burn-in time was '+str(time2-time0)+' seconds.'
#Perform MCMC fit
time0 = time.time()
pos, prob, state = sampler.run_mcmc(pos, 1)
time1=time.time()
print 'Fitting time was '+str(time1-time0)+' seconds.'
samples = sampler.flatchain
fig =corner.corner(samples, labels=["$B$","$T_{eff}$","$logg$","$RV$"], truths=[b_true,t_true,g_true,v_true])
fig.savefig('test_2.png')
"""
| mit |
JanNash/sms-tools | lectures/03-Fourier-properties/plots-code/convolution-2.py | 24 | 1259 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 7))
M = 64
N = 64
x1 = np.hanning(M)
x2 = np.cos(2*np.pi*2/M*np.arange(M))
y1 = x1*x2
mY1 = 20 * np.log10(np.abs(fftshift(fft(y1, N))))
plt.subplot(3,2,1)
plt.title('x1 (hanning)')
plt.plot(np.arange(-M/2, M/2), x1, 'b', lw=1.5)
plt.axis([-M/2,M/2,0,1])
plt.subplot(3,2,2)
plt.title('x2 (cosine)')
plt.plot(np.arange(-M/2, M/2),x2, 'b', lw=1.5)
plt.axis([-M/2,M/2,-1,1])
mX1 = 20 * np.log10(np.abs(fftshift(fft(x1, M)))/M)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(np.arange(-N/2, N/2),mX1, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mX1)])
mX2 = 20 * np.log10(np.abs(fftshift(fft(x2, M)))/M)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(np.arange(-N/2, N/2),mX2, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mX2)])
plt.subplot(3,2,5)
plt.title('DFT(x1 x x2)')
plt.plot(np.arange(-N/2, N/2),mY1, 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mY1)])
Y2 = np.convolve(fftshift(fft(x1, M)), fftshift(fft(x2, M)))
mY2 = 20 * np.log10(np.abs(Y2)) - 40
plt.subplot(3,2,6)
plt.title('X1 * X2')
plt.plot(np.arange(-N/2, N/2),mY2[M/2:M+M/2], 'r', lw=1.5)
plt.axis([-N/2,N/2,-80,max(mY2)])
plt.tight_layout()
plt.savefig('convolution-2.png')
plt.show()
| agpl-3.0 |
stggh/PyZeroCore | doc/conf.py | 1 | 10657 | # -*- coding: utf-8 -*-
#
# PyZeroCore documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 13 17:11:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import six
import os
import shlex
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# Scipy and Numpy packages cannot be installed in on readthedocs.io
# https://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
if six.PY3:
from unittest.mock import MagicMock
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
# MOCK_MODULES = ['numpy', 'scipy', 'scipy.special', 'numpy.linalg', 'scipy.ndimage', 'scipy.ndimage.interpolation',
# 'scipy.linalg', 'scipy.integrate', 'scipy.optimize']
#
MOCK_MODULES = []
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
source_parsers = {
'.md': CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyZeroCore'
copyright = '2017, PyZeroCore team'
author = 'PyZeroCore team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = "python3"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyZeroCoredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyZeroCore.tex', 'PyZeroCore Documentation',
'PyZeroCore team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyzerocore', 'PyZeroCore Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyZeroCore', 'PyZeroCore Documentation',
author, 'PyZeroCore', 'Python implementation of Stehman and Woo\'s zero-core contribution model', 'photodetachment spectra'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
required_symlinks = [
('examples', '../examples/')
]
autodoc_member_order = 'bysource'
# supress the "nonlocal image URI found" warnings
import sphinx.environment
from docutils.utils import get_source_line
def _warn_node(self, msg, node, **kwargs):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
| gpl-3.0 |
tkarna/cofs | test/tracerEq/test_h-advection_mes.py | 2 | 7568 | """
Testing 3D horizontal advection of tracers
"""
from thetis import *
import numpy
from scipy import stats
import pytest
def run(refinement, **model_options):
print_output('--- running refinement {:}'.format(refinement))
# domain dimensions - channel in x-direction
lx = 15.0e3
ly = 6.0e3/refinement
area = lx*ly
depth = 40.0
u = 1.0
# mesh
n_layers = 3*refinement
nx = 6*refinement + 1
ny = 1 # constant -- channel
mesh2d = RectangleMesh(nx, ny, lx, ly)
# simulation run time
t_end = 3000.0
# initial time
t_init = 0.0
t_export = (t_end - t_init)/8.0
# outputs
outputdir = 'outputs'
# bathymetry
p1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(p1_2d, name='Bathymetry')
bathymetry_2d.assign(depth)
solverobj = solver.FlowSolver(mesh2d, bathymetry_2d, n_layers)
options = solverobj.options
options.use_nonlinear_equations = False
options.use_ale_moving_mesh = False
options.use_lax_friedrichs_velocity = True
options.lax_friedrichs_velocity_scaling_factor = Constant(1.0)
options.use_lax_friedrichs_tracer = False
options.horizontal_velocity_scale = Constant(abs(u))
options.no_exports = True
options.output_directory = outputdir
options.simulation_end_time = t_end
options.simulation_export_time = t_export
options.solve_salinity = True
options.use_implicit_vertical_diffusion = False
options.use_bottom_friction = False
options.use_limiter_for_tracers = True
options.fields_to_export = ['salt_3d']
options.update(model_options)
uv_expr = as_vector((u, 0, 0))
bnd_salt_3d = {'value': Constant(0.0), 'uv': uv_expr}
bnd_uv_3d = {'uv': uv_expr}
solverobj.bnd_functions['salt'] = {
1: bnd_salt_3d,
2: bnd_salt_3d,
}
solverobj.bnd_functions['momentum'] = {
1: bnd_uv_3d,
2: bnd_uv_3d,
}
solverobj.create_equations()
t = t_init # simulation time
x0 = 0.3*lx
sigma = 1600.
xyz = SpatialCoordinate(solverobj.mesh)
t_const = Constant(t)
ana_salt_expr = exp(-(xyz[0] - x0 - u*t_const)**2/sigma**2)
salt_ana = Function(solverobj.function_spaces.H, name='salt analytical')
salt_ana_p1 = Function(solverobj.function_spaces.P1, name='salt analytical')
p1dg_ho = get_functionspace(solverobj.mesh, 'DG',
options.polynomial_degree + 2, vfamily='DG',
vdegree=options.polynomial_degree + 2)
salt_ana_ho = Function(p1dg_ho, name='salt analytical')
uv_init = Function(solverobj.function_spaces.U, name='initial uv')
uv_init.project(uv_expr)
solverobj.assign_initial_conditions(uv_3d=uv_init, salt=ana_salt_expr)
# export analytical solution
if not options.no_exports:
out_salt_ana = File(os.path.join(options.output_directory, 'salt_ana.pvd'))
def export_func():
if not options.no_exports:
solverobj.export()
# update analytical solution to correct time
t_const.assign(t)
salt_ana.project(ana_salt_expr)
out_salt_ana.write(salt_ana_p1.project(salt_ana))
# export initial conditions
export_func()
# custom time loop that solves tracer equation only
ti = solverobj.timestepper.timesteppers.salt_expl
i = 0
iexport = 1
next_export_t = t + solverobj.options.simulation_export_time
while t < t_end - 1e-8:
ti.advance(t)
t += solverobj.dt
i += 1
if t >= next_export_t - 1e-8:
print_output('{:3d} i={:5d} t={:8.2f} s salt={:8.2f}'.format(iexport, i, t, norm(solverobj.fields.salt_3d)))
export_func()
next_export_t += solverobj.options.simulation_export_time
iexport += 1
# project analytical solution on high order mesh
t_const.assign(t)
salt_ana_ho.project(ana_salt_expr)
# compute L2 norm
l2_err = errornorm(salt_ana_ho, solverobj.fields.salt_3d)/numpy.sqrt(area)
print_output('L2 error {:.12f}'.format(l2_err))
return l2_err
def run_convergence(ref_list, saveplot=False, **options):
"""Runs test for a list of refinements and computes error convergence rate"""
polynomial_degree = options.get('polynomial_degree', 1)
l2_err = []
for r in ref_list:
l2_err.append(run(r, **options))
x_log = numpy.log10(numpy.array(ref_list, dtype=float)**-1)
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'h-diffusion'
def check_convergence(x_log, y_log, expected_slope, field_str, saveplot):
slope_rtol = 0.20
slope, intercept, r_value, p_value, std_err = stats.linregress(x_log, y_log)
if saveplot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
# plot points
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = 0.05*(x_max - x_min)
npoints = 50
xx = numpy.linspace(x_min - offset, x_max + offset, npoints)
yy = intercept + slope*xx
# plot line
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[2*int(npoints/3)], yy[2*int(npoints/3)], '{:4.2f}'.format(slope),
verticalalignment='top',
horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(' '.join([setup_name, field_str, 'degree={:}'.format(polynomial_degree)]))
ref_str = 'ref-' + '-'.join([str(r) for r in ref_list])
degree_str = 'o{:}'.format(polynomial_degree)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, degree_str])
imgfile += '.png'
imgdir = create_directory('plots')
imgfile = os.path.join(imgdir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if expected_slope is not None:
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert slope > expected_slope*(1 - slope_rtol), err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, polynomial_degree+1, 'salt', saveplot)
# ---------------------------
# standard tests for pytest
# ---------------------------
@pytest.fixture(params=[1])
def polynomial_degree(request):
return request.param
@pytest.mark.parametrize(('stepper', 'use_ale'),
[('LeapFrog', True),
('SSPRK22', True)])
def test_horizontal_advection(polynomial_degree, stepper, use_ale):
run_convergence([1, 2, 3], polynomial_degree=polynomial_degree,
timestepper_type=stepper,
use_ale_moving_mesh=use_ale)
# ---------------------------
# run individual setup for debugging
# ---------------------------
if __name__ == '__main__':
run_convergence([1, 2, 3], polynomial_degree=0,
warped_mesh=True,
element_family='dg-dg',
timestepper_type='SSPRK22',
use_ale_moving_mesh=True,
no_exports=False, saveplot=True)
| mit |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/t2t/setup.py | 3 | 2648 | """Install tensor2tensor."""
from setuptools import find_packages
from setuptools import setup
setup(
name='tensor2tensor',
version='1.11.0',
description='Tensor2Tensor',
author='Google Inc.',
author_email='[email protected]',
url='http://github.com/tensorflow/tensor2tensor',
license='Apache 2.0',
packages=find_packages(),
package_data={
'tensor2tensor.data_generators': ['test_data/*'],
'tensor2tensor.data_generators.wikisum': ['test_data/*'],
'tensor2tensor.visualization': [
'attention.js', 'TransformerVisualization.ipynb'
],
},
scripts=[
'tensor2tensor/bin/t2t-trainer',
'tensor2tensor/bin/t2t-datagen',
'tensor2tensor/bin/t2t-decoder',
'tensor2tensor/bin/t2t-make-tf-configs',
'tensor2tensor/bin/t2t-exporter',
'tensor2tensor/bin/t2t-query-server',
'tensor2tensor/bin/t2t-insights-server',
'tensor2tensor/bin/t2t-avg-all',
'tensor2tensor/bin/t2t-bleu',
'tensor2tensor/bin/t2t-translate-all',
],
install_requires=[
'bz2file',
'dopamine-rl',
'flask',
'future',
'gevent',
'google-api-python-client',
'gunicorn',
'gym',
'h5py',
'mesh-tensorflow',
'numpy',
'oauth2client',
'opencv-python',
'requests',
'scipy',
'six',
'sympy',
'tensorflow-probability',
'tfds-nightly',
'tqdm',
],
extras_require={
'tensorflow': ['tensorflow>=1.12.0'],
'tensorflow_gpu': ['tensorflow-gpu>=1.12.0'],
'tensorflow-hub': ['tensorflow-hub>=0.1.1'],
'tests': [
'absl-py',
'pytest>=3.8.0',
'mock',
'pylint',
'jupyter',
'gsutil',
'matplotlib',
# Need atari extras for Travis tests, but because gym is already in
# install_requires, pip skips the atari extras, so we instead do an
# explicit pip install gym[atari] for the tests.
# 'gym[atari]',
],
'allen': ['Pillow==5.1.0', 'pandas==0.23.0'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
dependency_links=[
'git+https://github.com/tensorflow/cleverhans.git#egg=cleverhans'
],
keywords='tensorflow machine learning',
)
| apache-2.0 |
baklanovp/pystella | tests/test_lc.py | 1 | 10556 | import numpy as np
import unittest
import pystella as ps
__author__ = 'bakl'
def lc_create(bname, m=-19, tbeg=0., tend=200., n=10, is_err=False):
time = np.linspace(0. + tbeg, tend + tbeg, n)
mags = m * np.linspace(0.1, 1., n)
band = ps.Band(bname)
if is_err:
errs = m * np.linspace(0.01, 0.3, n)
return ps.LightCurve(band, time, mags, errs)
else:
return ps.LightCurve(band, time, mags)
class TestLightCurve(unittest.TestCase):
def test_BandName(self):
band = 'U'
lc = lc_create(band)
self.assertEqual(band, lc.Band.Name,
msg="It should be equal band names.\n \
Now band is %s but lc.Band.Name is %s." % (band, lc.Band.Name))
def test_LC_interp(self):
lc = lc_create('U', tbeg=0.)
time = np.linspace(10, 50, 5)
lc_interp = ps.rf.lc.LC_interp(lc, time)
self.assertEqual(len(time), lc_interp.Length, msg='The length of Interp LC should be equal len(time)')
# plot
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(lc.Time, lc.Mag, label='Origin')
ax.plot(lc_interp.Time, lc_interp.Mag, marker='o', label='Interpolated')
ax.invert_yaxis()
ax.legend()
plt.show()
def test_lc_leastsq(self):
dt_init = 10.
lc1 = lc_create('U', tbeg=0.)
lc2 = lc_create('U', tbeg=0.)
def test_lc_copy(self):
ps.band.Band.load_settings()
lc1 = lc_create('U', tbeg=0.)
lc2 = lc1.copy()
self.assertEqual(lc1.Length, lc2.Length, msg='The length of copy should be equal the original length')
self.assertEqual(lc1.Band.Name, lc2.Band.Name, msg='The lc of copy should be equal the original lc')
np.testing.assert_array_equal(lc1.Time, lc2.Time)
np.testing.assert_array_equal(lc1.Mag, lc2.Mag)
def test_lc_copy_filter(self):
tlim = (10., 99.)
ps.band.Band.load_settings()
# Time
lc1 = lc_create('V', m=-19, tbeg=1., tend=200., n=10, is_err=False)
lc2 = lc1.copy(f=lambda x: (tlim[0] <= x.Time) & (x.Time <= tlim[1]))
self.assertGreater(lc1.Length, lc2.Length, msg='The length of copy should be equal the original length')
self.assertEqual(lc1.Band.Name, lc2.Band.Name, msg='The lc of copy should be equal the original lc')
self.assertTrue(np.any(lc2.Time >= tlim[0]), msg='The lc.Time should be greater the lower limit')
self.assertTrue(np.any(lc2.Time <= tlim[1]), msg='The lc.Time should be smaller the lower limit')
# Magnitude
maglim = (-18., -10.)
lc3 = lc1.copy(f=lambda x: (maglim[0] <= x.Mag) & (x.Mag <= maglim[1]))
self.assertGreater(lc1.Length, lc3.Length, msg='The length of copy should be equal the original length')
self.assertEqual(lc1.Band.Name, lc3.Band.Name, msg='The lc of copy should be equal the original lc')
self.assertTrue(np.any(lc3.Mag >= maglim[0]), msg='The lc.Mag should be greater the lower limit')
self.assertTrue(np.any(lc3.Mag <= maglim[1]), msg='The lc.Mag should be smaller the lower limit')
def test_lc_clone(self):
lc1 = lc_create('U', tbeg=0.)
lc2, tshift, mshift = lc1.clone()
self.assertEqual(lc1.Length, lc2.Length, msg='The length of clone should be equal the original length')
self.assertEqual(lc1.Band.Name, lc2.Band.Name, msg='The band of clone should be equal the original band')
np.testing.assert_array_equal(lc1.Time, lc2.Time)
np.testing.assert_array_equal(lc1.Mag, lc2.Mag)
def test_lc_clone_add_err(self):
lc1 = lc_create('U', tbeg=0.)
err = [1] * lc1.Length
lc2, tshift, mshift = lc1.clone(err=err)
self.assertEqual(lc1.Length, lc2.Length, msg='The length of clone should be equal the original length')
np.testing.assert_array_equal(err, lc2.MagErr)
np.testing.assert_array_equal(lc1.Mag, lc2.Mag)
def test_lc_bol(self):
import matplotlib.pyplot as plt
from scipy.integrate import simps
m1 = ps.Stella('cat_R500_M15_Ni006_E12', path='data/stella')
curves = m1.curves(bands=['bol'], t_diff=1.0000001)
# ax = ps.light_curve_plot.curves_plot(curves, xlim=(0.7, 1), ylim=(-14, -24), is_line=False)
ax = ps.lcp.curves_plot(curves, xlim=(-10, 155), ylim=(-14, -24), is_line=False)
# tt
tt1 = m1.get_tt().load()
t = tt1['time']
ax.plot(t, tt1['Mbol'], label='tt-bolometric LC ', color='red', lw=2, ls=':')
# ph
if True:
ph = m1.get_ph()
m_bol = []
for t, spec in ph:
lum = simps(spec.Flux[::-1], spec.Freq[::-1])
bol = 4.75 - 2.5 * np.log10(np.abs(lum) / 3.86e33)
m_bol.append(bol)
ax.plot(ph.Time, m_bol, label='ph-bolometric LC ', color='green', lw=2, ls='-.')
ax.legend()
plt.show()
import warnings
warnings.warn("Should be check for shorck breakout")
def test_bol_Uni(self):
import matplotlib.pyplot as plt
m1 = ps.Stella('cat_R500_M15_Ni006_E12', path='data/stella')
fig, ax = plt.subplots()
# Bol
curves1 = m1.curves(bands=['bol'], wlrange=(1e0, 42.), is_nfrus=False)
for lc in curves1:
color = 'blue'
ax.plot(lc.Time, lc.Mag, label=lc.Band.Name, color=color, linewidth=2, ls='--')
band03kEv = ps.BandUni(name='bol', wlrange=(1e0, 42.), length=300)
wl_ab = np.min(band03kEv.wl2args), np.max(band03kEv.wl2args)
curves2 = m1.curves(bands=[band03kEv], is_nfrus=False, wl_ab=wl_ab)
for lc in curves2:
color = 'red'
ax.plot(lc.Time, lc.Mag, label=lc.Band.Name, color=color, linewidth=2, ls=':')
ax.invert_yaxis()
#
ax.legend()
# ax.set_ylim(-14, -24)
plt.show()
import warnings
warnings.warn("Should be check for shorck breakout")
class TestSetLightCurve(unittest.TestCase):
def test_SetLightCurve_BandNames(self):
bands = ['U', 'B', 'V']
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b))
self.assertCountEqual(bands, curves.BandNames,
msg="Error for band names.\n \ Now band is %s but lc.Band.Name is %s."
% (' '.join(bands), ' '.join(curves.BandNames)))
def test_SetLightCurve_save_true(self):
bands = ['U', 'B', 'V']
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b))
res = ps.curves_save(curves, 'tmp_curves')
self.assertTrue(res, msg="Error: curves_save should return True")
def test_SetLightCurve_save_read(self):
bands = ['U', 'B', 'V']
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b))
ps.curves_save(curves, 'tmp_curves')
read = ps.curves_read('tmp_curves')
self.assertTrue((np.array(curves.BandNames == read.BandNames)).all(),
msg="Error for the initial band names [%s] "
"VS secondary BandNames are %s."
% (' '.join(curves.BandNames), ' '.join(read.BandNames)))
self.assertTrue(np.allclose(curves.TimeCommon, read.TimeCommon),
msg="Error for the initial TimeCommon of Bands.\n \
Now band were %s but BandNames are %s."
% (' '.join(curves.BandNames), ' '.join(read.BandNames)))
# todo correct testing
# self.assertSequenceEqual(curves.TimeCommon, read.TimeCommon, msg="The time columns are not equal")
def test_SetLightCurve_save_true_with_errors(self):
bands = ['U', 'B', 'V']
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b, is_err=True))
curves.add(lc_create('I'))
res = ps.curves_save(curves, 'tmp_curves')
self.assertTrue(res, msg="Error: curves_save should return True")
def test_SetLightCurve_save_NoIsCommonTime(self):
bands = ['U', 'B', 'V']
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b))
curves.add(lc_create('TimeDif', tbeg=1.))
res = ps.curves_save(curves, 'tmp_curves_2')
self.assertTrue(res, msg="Error: curves_save should return True")
def test_SetLightCurve_copy_tmlim(self):
ps.band.Band.load_settings()
bands = ['U', 'B', 'V']
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b, m=-19, tbeg=0., tend=200., n=10, is_err=False))
curves.add(lc_create('R', tbeg=1.))
tlim = (10, 99)
mlim = (10, -18)
curves_cut = curves.copy_tmlim(tlim=tlim, mlim=mlim)
self.assertTrue(curves_cut.TimeMin >= tlim[0])
self.assertTrue(curves_cut.TimeMax <= tlim[1])
def test_SetLightCurve_clone_add_err(self):
bands = ['U', 'B', 'V']
bname = bands[1]
curves = ps.SetLightCurve()
for b in bands:
curves.add(lc_create(b))
curves.add(lc_create('TimeDif', tbeg=1.))
lc = curves[bname]
# Time
t = np.ones(lc.Length)
curves_clone = curves.clone(t=t)
self.assertEqual(curves_clone.Length, curves.Length,
msg=f'The length of clone{curves_clone.Length} should be equal the original length {curves.Length}')
lc_clone = curves_clone[bname]
np.testing.assert_array_equal(t, lc_clone.Time)
# Mag
mag = np.ones(lc.Length)
curves_clone = curves.clone(m=mag)
self.assertEqual(curves_clone.Length, curves.Length,
msg=f'The length of clone{curves_clone.Length} should be equal the original length {curves.Length}')
lc_clone = curves_clone[bname]
np.testing.assert_array_equal(mag, lc_clone.Mag)
# Err
err = np.ones(lc.Length)
curves_clone = curves.clone(err=err)
self.assertEqual(curves_clone.Length, curves.Length,
msg=f'The length of clone{curves_clone.Length} should be equal the original length {curves.Length}')
lc_clone = curves_clone[bname]
np.testing.assert_array_equal(err, lc_clone.MagErr)
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit |
wesm/statsmodels | scikits/statsmodels/examples/tut_ols.py | 1 | 2850 | '''Examples OLS
Note: uncomment plt.show() to display graphs
'''
import numpy as np
#from scipy import stats
import scikits.statsmodels.api as sm
import matplotlib
#matplotlib.use('Qt4Agg')#, warn=True) #for Spyder
import matplotlib.pyplot as plt
from scikits.statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(9876789)
# OLS non-linear curve but linear in parameters
# ---------------------------------------------
nsample = 50
sig = 0.5
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, np.sin(x1), (x1-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, -0.02, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
res = sm.OLS(y, X).fit()
print res.params
print res.bse
#current bug predict requires call to model.results
#print res.model.predict
prstd, iv_l, iv_u = wls_prediction_std(res)
plt.plot(x1, res.fittedvalues, 'r--.')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('blue: true, red: OLS')
print res.summary()
#OLS with dummy variables
#------------------------
sig = 1.
#suppose observations from 3 groups
xg = np.zeros(nsample, int)
xg[20:40] = 1
xg[40:] = 2
print xg
dummy = (xg[:,None] == np.unique(xg)).astype(float)
#use group 0 as benchmark
X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
res2 = sm.OLS(y, X).fit()
print res2.params
print res2.bse
#current bug predict requires call to model.results
#print res.model.predict
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.plot(x1, res2.fittedvalues, 'r--.')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('blue: true, red: OLS')
#print res.summary()
R = [[0, 1, 0, 0],
[0, 0, 1, 0]]
# F test joint hypothesis R * beta = 0
# i.e. coefficient on both dummy variables equal zero
print res2.f_test(R)
# strongly rejected Null of identical constant in 3 groups
#<F test: F=124.19050615860911, p=2.87411973729e-019, df_denom=46, df_num=2>
# see also: help(res2.f_test)
# t test for Null hypothesis effects of 2nd and 3rd group add to zero
R = [0, 1, -1, 0]
print res2.t_test(R)
# don't reject Null at 5% confidence level (note one sided p-value)
#<T test: effect=1.0363792917100714, sd=0.52675137730463362, t=1.9674923243925513, p=0.027586676754860262, df_denom=46>
# OLS with small group effects
beta = [1., 0.3, -0.0, 10]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
res3 = sm.OLS(y, X).fit()
print res3.f_test(R)
# don't reject Null of identical constant in 3 groups
#<F test: F=1.9715385826285652, p=0.15083366806, df_denom=46, df_num=2>
#plt.draw()
#plt.show()
| bsd-3-clause |
fire-rs-laas/fire-rs-saop | main.py | 1 | 3864 | """Main client of fire_rs library"""
from python.fire_rs.geodata import environment
import python.fire_rs.firemodel.propagation as propagation
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource
from matplotlib.ticker import FuncFormatter
from matplotlib import cm, pyplot
import numpy as np
import random
def display():
area = [[535000.0, 540000.0], [6235000.0, 6240000.0]]
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
# area = [[525000.0, 530000.0], [6225000.0, 6230000.0]] # Produces error!
# area = [[525000.0, 530000.0], [6225000.0, 6250000.0]]
ignition_point = (100, 100)
area_wind = (10, np.pi)
# Light a fire and propagate it over the whole area
env = propagation.Environment(area, wind_speed=area_wind[0], wind_dir=area_wind[1])
prop = propagation.propagate(env, *ignition_point)
prop.plot(blocking=True)
def step_by_step():
area = [[530000.0, 535000.0], [6230000.0, 6235000.0]]
ignition_point = (100, 100)
ignition_time = 0
area_wind = (10, np.pi)
# Light a fire and propagate it over the whole area
env = propagation.Environment(area, wind_speed=area_wind[0], wind_dir=area_wind[1])
propag = propagation.FirePropagation(env)
propag.set_ignition_point(*ignition_point, ignition_time)
plt.figure().gca(aspect='equal', xlabel="X position [m]", ylabel="Y position [m]")
# todo: axes and directions are wrong when using imshow to display an array (see workarounds in GeoData)
p = plt.imshow(propag.ignitions().data.view('float64'))
plt.title("Wildfire propagation")
next_step = ignition_time
while not propag.propagation_finished:
propag.propagate(next_step)
next_step += 300
p.set_data(propag.ignitions().data.view('float64'))
plt.pause(0.1)
def multiple_ignitions():
area = [[480060.0, 489060.0], [6210074.0, 6217074.0]]
ignitions = [propagation.TimedPoint(486000.0, 6214000.0, 0.),
propagation.TimedPoint(486000.0, 6214100.0, 0.),
propagation.TimedPoint(486000.0, 6214150.0, 0.),
propagation.TimedPoint(486000.0, 6214200.0, 0.),
propagation.TimedPoint(486000.0, 6214250.0, 0.),
propagation.TimedPoint(486000.0, 6214300.0, 0.),
propagation.TimedPoint(486000.0, 6214350.0, 0.),
propagation.TimedPoint(486000.0, 6214430.0, 0.)]
area_wind = (10, np.pi)
# Light a fire and propagate it over the whole area
env = propagation.Environment(area, wind_speed=area_wind[0], wind_dir=area_wind[1])
fireprop = propagation.FirePropagation(env)
for ignition in ignitions:
fireprop.set_ignition_point(ignition)
fireprop.propagate(1200+9600+5000)
fireprop.plot(blocking=False)
low = 159.5
high = 160.5
ignitions2 = []
ignition_array = fireprop.prop_data['ignition'] / 60
low_bound = ignition_array > low
up_bound = ignition_array < high
valid_points = low_bound & up_bound
selected_points = []
for hor in range(ignition_array.shape[0]):
for ver in range(ignition_array.shape[1]):
if valid_points[hor, ver]:
ignitions2.append(propagation.TimedPoint(*fireprop.prop_data.coordinates((hor, ver)), 80.))
print(ignitions2)
ignitions = random.sample(ignitions2, 50)
# Light a fire and propagate it over the whole area
env = propagation.Environment(area, wind_speed=area_wind[0], wind_dir=area_wind[1])
fireprop = propagation.FirePropagation(env)
for ignition in ignitions:
fireprop.set_ignition_point(ignition)
fireprop.propagate(1200+5000)
fireprop.plot(blocking=False)
print(ignitions)
if __name__ == "__main__":
# step_by_step()
multiple_ignitions()
| bsd-2-clause |
wlamond/scikit-learn | sklearn/covariance/robust_covariance.py | 3 | 30206 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
florianziemen/bin | read_huss.py | 1 | 3485 | #!/usr/bin/env python
import gdal
import math
from gdalconst import *
import struct
from netCDF4 import Dataset
import matplotlib.pylab as mpl
import numpy as np
import sys
import glob
increment = 30.
big_rows = big_cols = 10010
outfile = Dataset("big_file.nc", 'w', format='NETCDF4')
#print outfile.file_format
fdx = outfile.createDimension('x', big_cols)
fdy = outfile.createDimension('y', big_rows)
fvx = outfile.createVariable('x','f4',('x',))
fvy = outfile.createVariable('y','f4',('y',))
out_height = outfile.createVariable('height','f4',('y','x'), fill_value=-9.e9)
out_height.coordinates = "y x" ;
count = outfile.createVariable('count','i4',('y','x'), fill_value=-9.e9)
count.coordinates = "y x" ;
count[:] = 0
# out_height[:]=0.
x_offset = 1000010.8
y_offset = 1000028.1
fvx[:] = np.arange (big_cols) * increment + (x_offset + increment/2.)
fvy[:] = np.arange (big_rows) * increment + (y_offset + increment/2.)
for filename in glob.glob("thick_*.agr"): # .grid
dataset = gdal.Open(filename, GA_ReadOnly)
driver = dataset.GetDriver().LongName
geotransform = dataset.GetGeoTransform()
band = dataset.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType)
scanline = band.ReadRaster( 0, 0, band.XSize, 1,band.XSize, 1, band.DataType)
cols = dataset.RasterXSize
rows = dataset.RasterYSize
bands = dataset.RasterCount
fincrement = - geotransform[-1]
if fincrement != increment:
print "GRID INCREMENTS DON'T MATCH! EXPECTING " + str(increment) + " GOT " + str(fincrement)
print filename
continue
# sys.exit(666)
xll = geotransform [0]
xur = xll + cols * increment # remap to ur corner.
yur = geotransform [3]
yll = yur - rows * increment # remap to ll corner.
data = band.ReadAsArray(0, 0, cols, rows)
#mpl.imshow(data, interpolation = "nearest")
#mpl.colorbar()
#mpl.show()
# print geotransform
xpos = np.arange(cols) * increment + xll + increment *.5
ypos = np.arange(rows) * increment + yll + increment *.5
writesingle = False
if writesingle :
single_outfilename = "test.nc"
single_outfile = Dataset(single_outfilename, 'w', format='NETCDF4')
# print single_outfile.file_format
lon = single_outfile.createDimension('lon', cols)
lat = single_outfile.createDimension('lat', rows)
latitudes = single_outfile.createVariable('lat','f4',('lat',))
longitudes = single_outfile.createVariable('lon','f4',('lon',))
height = single_outfile.createVariable('height','f4',('lat','lon'))
print xpos.shape
print ypos.shape
print data.shape
longitudes[:] = xpos
latitudes[:] = ypos
height[:] = data[::-1,:]
single_outfile.close()
# print xpos.shape
# print ypos.shape
# print data.shape
xmin = int(math.floor((xpos[0] - x_offset) / increment))
xmax = int(xmin + cols)
ymin = int(math.floor((ypos[0] - y_offset) / increment))
ymax = int(ymin + rows)
if xmin > 0 and xmax < big_cols and ymin > 0 and ymax < big_rows:
# print (xmin, xmax, ymin, ymax)
out_height[ymin:ymax,xmin:xmax] = np.where(data[::-1,:]> 0 , data[::-1,:], out_height[ymin:ymax,xmin:xmax] )
count[ymin:ymax,xmin:xmax] += (data [::-1,:] > 0 )
# out_height[:] = np.where(count[:] > 0 , ( out_height[:] / count[:] ), out_height[:])
out_height[:]=np.where(count[:],out_height[:],0)
outfile.close()
| gpl-3.0 |
massmutual/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
BabeNovelty/numpy | numpy/linalg/linalg.py | 31 | 75612 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
darcamo/sir_colormaps | simulate_metis_scenario.py | 1 | 18421 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# NOTE: You need pyphysim to run this file.
# https://github.com/darcamo/pyphysim
"""
Simulator for the SINRs of a dense indoor scenario.
The scenario is a very simplified version of the Test Case 2 from the METIS
project. Only one floor of one building is simulated and only the indoor
access points are considered.
"""
# xxxxxxxxxx Add the parent folder to the python path. xxxxxxxxxxxxxxxxxxxx
import sys
import os
try:
parent_dir = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
sys.path.append(parent_dir)
except NameError:
sys.path.append('../')
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Import Statements xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
import math
import numpy as np
from matplotlib import pyplot as plt
# import matplotlib as mpl
from pyphysim.util.conversion import dB2Linear, dBm2Linear, linear2dB
# from pyphysim.cell import shapes
from pyphysim.comm import pathloss
from pyphysim.comm.channels import calc_thermal_noise_power_dBm
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def calc_room_positions_square(side_length, num_rooms):
"""
Calculate the central positions of the square rooms.
"""
sqrt_num_rooms = int(math.sqrt(num_rooms))
if sqrt_num_rooms ** 2 != num_rooms:
raise ValueError("num_rooms must be a perfect square number")
int_positions = np.unravel_index(np.arange(num_rooms), (sqrt_num_rooms,
sqrt_num_rooms))
room_positions = (side_length * (int_positions[1] + 1j *
int_positions[0][::-1] - 0.5-0.5j))
# Shift the room positions so that the origin becomes the center of all
# rooms
shift = side_length * (sqrt_num_rooms - 1) // 2
room_positions = (room_positions
- shift - 1j * shift
+ side_length / 2. + 1j * side_length / 2.)
return room_positions
def plot_all_rooms(all_rooms, ax=None):
"""
Plot all Rectangle shapes in `all_rooms` using the `ax` axis.
Parameters
----------
ax : matplotlib axis.
The axis where the rooms will be plotted.
all_rooms : iterable of shape.Rectangle objects
The rooms to be plotted.
Returns
-------
ax : Matplotlib axes
The axes used to plot.
"""
standalone = False
if ax is None:
_, ax = plt.subplots(figsize=(8, 6))
standalone = True
for room in all_rooms:
room.plot(ax)
if standalone is True:
# Do some extra stuff like setting the plot axis limits.
# First we get all vertices of all rectangles.
all_vertices = np.vstack([r.vertices for r in all_rooms])
xmin = all_vertices.real.min()
xmax = all_vertices.real.max()
ymin = all_vertices.real.min()
ymax = all_vertices.real.max()
ax.set_ylim([ymin, ymax])
ax.set_xlim([xmin, xmax])
ax.set_xlabel("Position X coordinate")
ax.set_ylabel("Position Y coordinate")
ax.set_title("Plot of all Rooms")
return ax
def calc_num_walls(side_length, room_positions, ap_positions):
"""
Calculate the number of walls between each room to each AP.
This is used to calculated the wall losses as well as the indoor
pathloss.
Parameters
----------
side_length : float
The side length of the square room.
room_positions : 2D complex numpy array
The positions of all rooms in the grid.
ap_positions : 1D complex numpy array
The positions of access points in the grid.
Returns
-------
num_walls : 2D numpy array of ints
The number of walls from each room to access point.
"""
all_positions_diffs = (room_positions.reshape(-1, 1)
- 1.0001*ap_positions.reshape(1, -1))
num_walls \
= np.round(
np.absolute(np.real(all_positions_diffs / side_length)) +
np.absolute(np.imag(all_positions_diffs / side_length))
).astype(int)
return num_walls
def prepare_sinr_array_for_color_plot(
sinr_array, num_rooms_per_side, num_discrete_positions_per_room):
"""
Parameters
----------
sinr_array : TYPE
num_rooms_per_side : TYPE
num_discrete_positions_per_room : TYPE
"""
out = np.swapaxes(sinr_array, 1, 2).reshape(
[num_rooms_per_side * num_discrete_positions_per_room,
num_rooms_per_side * num_discrete_positions_per_room],
order='C')
return out
def get_ap_positions(room_positions, decimation=1):
"""
Get the array of AccessPoint positions for the desired decimation and
room_positions.
Each access point is placed in the center of the room where it is
located. The value of `decimation` controls the frequency of APs. A
value of 1 means one AP in each room. A value of 2 means one AP each 2
rooms and so on.
The valid decimation values are 1, 2, 4 and 9. Any other value will
raise an exception.
Parameters
----------
room_positions : 2D numpy array with shape (n, n)
The positions of each room.
decimation : int
The decimation (in number of room) of the APs.
Returns
-------
ap_positions : 1D numpy array with shape (n**2)
The position of the arrays.
"""
mask = np.zeros(room_positions.shape, dtype=bool)
if decimation == 1:
mask[:, :] = True
elif decimation == 2:
mask[1::2, ::2] = True
mask[::2, 1::2] = True
elif decimation == 4:
mask[1::2, 1::2] = True
elif decimation == 9:
mask[1::3, 1::3] = True
else:
raise ValueError('Invalid decimation value: {0}'.format(decimation))
ap_positions = room_positions[mask]
return ap_positions.flatten()
def simulate_for_a_given_ap_assoc(
pl, ap_assoc, wall_losses_dB, Pt, noise_var):
"""
Simulate and return the SINR for a given path loss and AP associations.
This is an internal function called inside
`perform_simulation_SINR_heatmap`
Parameters
----------
pl : 5D numpy float array
The path loss (in LINEAR SCALE) from each discrete position in each
room to each access point. Dimension: (n, n, d, d, a) where 'n' is
the number of rooms per dimension, 'd' is the number of discrete
positons in one room (per dimension) and 'a' is the number of
access points.
ap_assoc : 4D numpy int array
The index of the access point that each discrete point in each room
is associated with. Dimension: (n, n, d, d)
wall_losses_dB : 5D numpy int array
The wall losses (in dB) from each discrete user in each room to
each access point. Dimension: (n, n, d, d, a)
Pt : float
Transmit power.
noise_var : float
Noise variance (power)
Returns
-------
sinr_array_dB : 4D numpy array
The SINR (in dB) of each discrete point of each room.
"""
wall_losses = dB2Linear(-wall_losses_dB)
# Number of APs is the last dimension in the path loss array
num_aps = pl.shape[-1]
# Output variable
sinr_array = np.empty(ap_assoc.shape, dtype=float)
for ap_idx in range(num_aps):
# Mask of the users associated with the current access point
mask = (ap_assoc == ap_idx)
# # Mask of the users NOT associated with the current access point
# mask_n = np.logical_not(mask)
# Mask with all APs except the current one (that is, the
# interfering APs)
mask_i_aps = np.arange(num_aps) != ap_idx
# Each element in desired_power is the desired power of one user
# associated with the current access point
desired_power = Pt * wall_losses[mask, ap_idx] * pl[mask, ap_idx]
undesired_power = np.sum(
Pt * wall_losses[mask][:, mask_i_aps] * pl[mask][:, mask_i_aps],
axis=-1)
sinr_array[mask] = (desired_power / (undesired_power + noise_var))
return linear2dB(sinr_array)
def perform_simulation_SINR_heatmap(scenario_params, # pylint: disable=R0914
power_params):
"""
Perform the simulation.
"""
# xxxxxxxxxx Simulation Scenario Configuration xxxxxxxxxxxxxxxxxxxxxxxx
# The size of the side of each square room
side_length = scenario_params['side_length']
# How much (in dB) is lost for each wall teh signal has to pass
single_wall_loss_dB = scenario_params['single_wall_loss_dB']
# Square of 12 x 12 square rooms
num_rooms_per_side = scenario_params['num_rooms_per_side']
# Total number of rooms in the grid
num_rooms = num_rooms_per_side ** 2
# 1 means 1 ap every room. 2 means 1 ap every 2 rooms and so on. Valid
# values are: 1, 2, 4 and 9.
ap_decimation = scenario_params['ap_decimation']
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Simulation Power Configuration xxxxxxxxxxxxxxxxxxxxxxxxxxx
# Transmit power of each access point
Pt_dBm = power_params['Pt_dBm']
noise_power_dBm = power_params['noise_power_dBm']
Pt = dBm2Linear(Pt_dBm) # 20 dBm transmit power
noise_var = dBm2Linear(noise_power_dBm)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Discretization of the possible positions xxxxxxxxxxxxxxxxx
num_discrete_positions_per_room = 15 # Number of discrete positions
step = 1. / (num_discrete_positions_per_room)
aux = np.linspace(
-(1. - step), (1. - step), num_discrete_positions_per_room)
aux = np.meshgrid(aux, aux, indexing='ij')
user_relative_positions = aux[1] + 1j * aux[0][::-1]
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate the positions of all rooms xxxxxxxxxxxxxxxxxxxxx
room_positions = calc_room_positions_square(side_length, num_rooms)
room_positions.shape = (num_rooms_per_side, num_rooms_per_side)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Create the path loss object xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
pl_3gpp_obj = pathloss.PathLoss3GPP1()
pl_free_space_obj = pathloss.PathLossFreeSpace()
pl_3gpp_obj.handle_small_distances_bool = True
pl_free_space_obj.handle_small_distances_bool = True
pl_metis_ps7_obj = pathloss.PathLossMetisPS7()
pl_metis_ps7_obj.handle_small_distances_bool = True
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxx Add one user in each discrete position of each room xxxxxxxxxx
user_relative_positions2 = user_relative_positions * side_length / 2.
user_positions = (room_positions[:, :, np.newaxis, np.newaxis] +
user_relative_positions2[np.newaxis, np.newaxis, :, :])
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx AP Allocation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# 1 AP in each room
ap_positions = get_ap_positions(room_positions, ap_decimation)
num_aps = ap_positions.size
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate distances: each user to each AP xxxxxxxxxxxxxxxx
# Dimension: (romm_row, room_c, user_row, user_col, num_APs)
dists_m = np.abs(
user_positions[:, :, :, :, np.newaxis]
- ap_positions.reshape([1, 1, 1, 1, -1]))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate AP association xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Determine with which AP each user is associated with.
# Each user will associate with the CLOSEST access point.
ap_assoc = np.argmin(dists_m, axis=-1)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate wall losses xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# We want to calculate the number walls from each room to the rooms
# which have an access point.
# Dimension is (num_rooms, num_aps)
num_walls = calc_num_walls(side_length, room_positions, ap_positions)
# Reshape it to (num_rooms_per_side, num_rooms_per_side, 1, 1, num_aps)
num_walls_extended = num_walls.reshape(
[num_rooms_per_side, num_rooms_per_side, 1, 1, num_aps])
# And finally broadcast the (1, 1) dimensions to the number of users
# per room. This will make num_walls_extended have the same dimension
# as dists_m.
num_walls_extended, _ = np.broadcast_arrays(num_walls_extended, dists_m)
wall_losses_dB = num_walls_extended * single_wall_loss_dB
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate the path losses xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# The METIS PS7 path loss model require distance values in meters,
# while the others are in Kms. All distances were calculates in meters
# and, therefore, we divide the distance in by 1000 for 3GPP and free
# space.
pl_3gpp = pl_3gpp_obj.calc_path_loss(dists_m/1000.)
pl_free_space = pl_free_space_obj.calc_path_loss(dists_m/1000.)
pl_nothing = np.ones(
[num_rooms_per_side,
num_rooms_per_side,
num_discrete_positions_per_room,
num_discrete_positions_per_room, num_aps],
dtype=float)
# We need to know the number of walls the signal must pass to reach the
# receiver to calculate the path loss for the METIS PS7 model.
pl_metis_ps7 = pl_metis_ps7_obj.calc_path_loss(
dists_m,
num_walls=num_walls_extended)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Calculate the SINRs for each path loss model xxxxxxxxxxxxx
sinr_array_pl_nothing_dB = simulate_for_a_given_ap_assoc(
pl_nothing, ap_assoc, wall_losses_dB, Pt, noise_var)
sinr_array_pl_3gpp_dB = simulate_for_a_given_ap_assoc(
pl_3gpp, ap_assoc, wall_losses_dB, Pt, noise_var)
sinr_array_pl_free_space_dB = simulate_for_a_given_ap_assoc(
pl_free_space, ap_assoc, wall_losses_dB, Pt, noise_var)
sinr_array_pl_metis_ps7_dB = simulate_for_a_given_ap_assoc(
pl_metis_ps7, ap_assoc, wall_losses_dB, Pt, noise_var)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
out = (sinr_array_pl_nothing_dB,
sinr_array_pl_3gpp_dB,
sinr_array_pl_free_space_dB,
sinr_array_pl_metis_ps7_dB)
return out
if __name__ == '__main__':
scenario_params = {
'side_length': 10., # 10 meters side length
'single_wall_loss_dB': 5.,
'num_rooms_per_side': 12,
'ap_decimation': 1}
power_params = {
'Pt_dBm': 20., # 20 dBm transmit power
# Noise power for 25°C for a bandwidth of 5 MHz -> -106.87 dBm
'noise_power_dBm': calc_thermal_noise_power_dBm(25, 5e6)
}
# xxxxxxxxxx AP Decimation = 1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
scenario_params['ap_decimation'] = 1
out1 = perform_simulation_SINR_heatmap(scenario_params, power_params)
(sinr_array_pl_nothing_dB_1,
sinr_array_pl_3gpp_dB_1,
sinr_array_pl_free_space_dB_1,
sinr_array_pl_metis_ps7_dB_1) = out1
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx AP Decimation = 2 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
scenario_params['ap_decimation'] = 2
out2 = perform_simulation_SINR_heatmap(scenario_params, power_params)
(sinr_array_pl_nothing_dB_2,
sinr_array_pl_3gpp_dB_2,
sinr_array_pl_free_space_dB_2,
sinr_array_pl_metis_ps7_dB_2) = out2
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx AP Decimation = 4 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
scenario_params['ap_decimation'] = 4
out4 = perform_simulation_SINR_heatmap(scenario_params, power_params)
(sinr_array_pl_nothing_dB_4,
sinr_array_pl_3gpp_dB_4,
sinr_array_pl_free_space_dB_4,
sinr_array_pl_metis_ps7_dB_4) = out4
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx AP Decimation = 9 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
scenario_params['ap_decimation'] = 9
out9 = perform_simulation_SINR_heatmap(scenario_params, power_params)
(sinr_array_pl_nothing_dB_9,
sinr_array_pl_3gpp_dB_9,
sinr_array_pl_free_space_dB_9,
sinr_array_pl_metis_ps7_dB_9) = out9
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
min_sinr_ap_dec_1 = sinr_array_pl_metis_ps7_dB_1[3:-3, 3:-3, :, :].min()
mean_sinr_ap_dec_1 = sinr_array_pl_metis_ps7_dB_1[3:-3, 3:-3, :, :].mean()
max_sinr_ap_dec_1 = sinr_array_pl_metis_ps7_dB_1[3:-3, 3:-3, :, :].max()
min_sinr_ap_dec_2 = sinr_array_pl_metis_ps7_dB_2[3:-3, 3:-3, :, :].min()
mean_sinr_ap_dec_2 = sinr_array_pl_metis_ps7_dB_2[3:-3, 3:-3, :, :].mean()
max_sinr_ap_dec_2 = sinr_array_pl_metis_ps7_dB_2[3:-3, 3:-3, :, :].max()
min_sinr_ap_dec_4 = sinr_array_pl_metis_ps7_dB_4[3:-3, 3:-3, :, :].min()
mean_sinr_ap_dec_4 = sinr_array_pl_metis_ps7_dB_4[3:-3, 3:-3, :, :].mean()
max_sinr_ap_dec_4 = sinr_array_pl_metis_ps7_dB_4[3:-3, 3:-3, :, :].max()
min_sinr_ap_dec_9 = sinr_array_pl_metis_ps7_dB_9[3:-3, 3:-3, :, :].min()
mean_sinr_ap_dec_9 = sinr_array_pl_metis_ps7_dB_9[3:-3, 3:-3, :, :].mean()
max_sinr_ap_dec_9 = sinr_array_pl_metis_ps7_dB_9[3:-3, 3:-3, :, :].max()
print ("Min (AP Dec. 1): {0:.3f} ".format(min_sinr_ap_dec_1))
print ("Mean (AP Dec. 1): {0:.3f} ".format(mean_sinr_ap_dec_1))
print ("Max (AP Dec. 1): {0:.3f} ".format(max_sinr_ap_dec_1))
print ("Min (AP Dec. 2): {0:.3f} ".format(min_sinr_ap_dec_2))
print ("Mean (AP Dec. 2): {0:.3f} ".format(mean_sinr_ap_dec_2))
print ("Max (AP Dec. 2): {0:.3f} ".format(max_sinr_ap_dec_2))
print ("Min (AP Dec. 4): {0:.3f} ".format(min_sinr_ap_dec_4))
print ("Mean (AP Dec. 4): {0:.3f} ".format(mean_sinr_ap_dec_4))
print ("Max (AP Dec. 4): {0:.3f} ".format(max_sinr_ap_dec_4))
print ("Min (AP Dec. 9): {0:.3f} ".format(min_sinr_ap_dec_9))
print ("Mean (AP Dec. 9): {0:.3f} ".format(mean_sinr_ap_dec_9))
print ("Max (AP Dec. 9): {0:.3f} ".format(max_sinr_ap_dec_9))
| cc0-1.0 |
TheChymera/pyxrand | pyxrand.py | 1 | 7876 | #!/usr/bin/env python
from __future__ import division
__author__ = 'Horea Christian'
from os import path, listdir
from scipy import ndimage
from scipy.misc import toimage
from skimage.util.shape import view_as_windows
from skimage.util.montage import montage2d
import numpy as np
import matplotlib.cm as cm
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
by_pixel = False # True if you want to shuffle by-pixel, False if you want to shuffle by cluster.
#~ experiment_path = '~/src/faceRT/img/px4/' # path where image files are located
subdir = 'pixShuffle_series/'
cell_size_step = 4 # in what steps should the cell size increase [px] ?
cell_size_minimum = 26 # what's the minimum cell size / start cell size [px] ?
cell_size_increments = 1 # how many pictures do you want ?
max_randomness = 12 # type maximal re-mapping radius -- ONLY RELEVANT FOR by_pixel == True
randomness_steps = 6 # type desired number of randomness steps (i.e. the number of output files) -- ONLY RELEVANT FOR by_pixel == True
column_tolerance = 6 # the columns are the first step in ROI selection. This setting to accounts for slightly fuzzy background
row_tolerance = 3 # the columns are the second step in ROI selection. This setting to accounts for slightly fuzzy background, is extra small because for small clusters equal-color lines may occur in the face region
try:
experiment_path
except NameError:
localpath = path.dirname(path.realpath(__file__)) + '/' + subdir
else: localpath = path.expanduser(experiment_path)
input_folder = localpath
for pic in listdir(input_folder):
if by_pixel:
print(pic)
randomness_step = int(max_randomness / randomness_steps)
def randomization_funct(output_coords,rdness):
return (output_coords[0] + np.random.randint(-rdness*randomness_step, rdness*randomness_step+1, (1, 1)), output_coords[1] + np.random.randint(-rdness*randomness_step, rdness*randomness_step+1, (1, 1)))
im = mpimg.imread(input_folder+pic)
for rdness in np.arange(randomness_steps)+1:
im = ndimage.geometric_transform(im, randomization_funct, mode= 'nearest', extra_arguments=(rdness,))
toimage(im, cmin=0, cmax=255).save(input_folder+path.splitext(pic)[0]+'_px'+str("%03d" % (rdness*randomness_step))+'rand.jpg') # use this instead of imsave to avoide rescaling to maximal dynamic range
print('Done!')
else:
print(pic)
for cell_increment in np.arange(cell_size_increments):
cell_size = cell_size_minimum+cell_size_step*cell_increment
im = mpimg.imread(input_folder+pic)
try:
height, width = np.shape(im)
except ValueError:
print('The script currently only supports grayscale images - this is probably RGB. It will not be processed to clusters.')
continue
slice_coordinates = np.zeros(2)
slices = np.zeros((cell_size, cell_size))
# calculate subimage to make sure that pixels exceeding the optimal slice are distributed equally along x and y
nonzero_y = np.shape([line for line in im if len(np.unique(line)) >= column_tolerance])[0] # counts the number of lines with more than background values
leadingzeros_y = 0
for y in im:
if len(np.unique(y)) < column_tolerance: # counts the number of lines with less than background values
leadingzeros_y +=1
else:
break
rest_y_d = np.floor((cell_size-(nonzero_y % cell_size)) / 2) # pixels surplus after cluster placement within ROI (d for down)
rest_y_u = np.ceil((cell_size-(nonzero_y % cell_size)) / 2)
sub_im = im[leadingzeros_y-rest_y_u:leadingzeros_y+nonzero_y+rest_y_d,:]
if leadingzeros_y+nonzero_y+rest_y_d > np.shape(im)[0]:
print('The ROI of this pictured is positioned too far down, so that the last clusters areas exceed the image border. This picture will not be processed to clusters.')
continue
if leadingzeros_y-rest_y_u <=0:
print('This picture has a bad background (above the ROI). It will not be processed to clusters.')
continue
# end subimage
sub_im_rows = np.reshape(sub_im, (-1, cell_size, np.shape(sub_im)[1]))
row_start_stop_cells = np.zeros((np.shape(sub_im_rows)[0],3)) # variable containing the x position where the first cell starts and the last cell ends
all_squares = np.zeros((1,cell_size,cell_size)) # first zeroes frame (created just for the vstack to work at the first iteration)
break_parentloop = False # variable for continuing if an exclusion criterion inside the nestled loops is met.
for row_number, sub_im_row in enumerate(sub_im_rows):
nonzero_x_row = np.shape([line for line in sub_im_row.T if len(np.unique(line)) >= row_tolerance])[0] # counts the number of lines with more than background values
leadingzeros_x_row = 0
for x in sub_im_row.T:
if len(np.unique(x)) < row_tolerance: # counts the number of lines with less than background values
leadingzeros_x_row +=1
else:
break
rest_x_r = np.floor((cell_size-(nonzero_x_row % cell_size)) / 2) # pixels surplus after cluster placement within ROI
rest_x_l = np.ceil((cell_size-(nonzero_x_row % cell_size)) / 2)
if leadingzeros_x_row-rest_x_l <=0:
print('This picture has a bad background (left of the ROI). It will not be processed to clusters.')
break_parentloop = True
break
else:
sub_row = sub_im_row[:,leadingzeros_x_row-rest_x_l:leadingzeros_x_row+nonzero_x_row+rest_x_r]
squares = view_as_windows(sub_row, (cell_size, cell_size))
cell_squares = squares[:,::cell_size][0]
all_squares = np.vstack((all_squares, cell_squares))
row_start_stop_cells[row_number, 0] = leadingzeros_x_row-rest_x_l # start pos
row_start_stop_cells[row_number, 1] = np.shape(im)[1]-(leadingzeros_x_row+nonzero_x_row+rest_x_r) # stop pos (calculated from far end)
row_start_stop_cells[row_number, 2] = np.shape(cell_squares)[0] # cells number
if row_start_stop_cells[row_number, 1] < 0:
print('The ROI of this pictured is positioned too far left, so that the last clusters areas exceed the image border. This picture will not be processed to clusters.')
break_parentloop = True
elif row_start_stop_cells[row_number, 0] < 0:
print('The ROI of this pictured is positioned too far right, so that the last clusters areas exceed the image border. This picture will not be processed to clusters.')
break_parentloop = True
if break_parentloop:
continue
all_squares = all_squares[1:] # remove first zeroes frame (created just for the vstack to work at the first iteration)
all_squares = np.random.permutation(all_squares)
pad_value = np.mean(im[:cell_size,:cell_size]).astype(int)
reconstructed_im = np.ones((leadingzeros_y-rest_y_u,np.shape(im)[1])) * pad_value
scrambled_image = np.zeros((1, np.shape(im)[1]))
for row_number, sub_im_row in enumerate(sub_im_rows):
shuffled_squares = montage2d(all_squares[:row_start_stop_cells[row_number, 2]], grid_shape=(1,row_start_stop_cells[row_number, 2]))
all_squares = all_squares[row_start_stop_cells[row_number, 2]:]
padded_row = np.pad(shuffled_squares, ((0,0),(row_start_stop_cells[row_number,0],row_start_stop_cells[row_number,1])), 'constant' ,constant_values=pad_value)
scrambled_image = np.vstack((scrambled_image, padded_row))
scrambled_image = scrambled_image[1:]
scrambled_image = np.pad(scrambled_image, ((leadingzeros_y-rest_y_u, np.shape(im)[0]-(leadingzeros_y+nonzero_y+rest_y_d)),(0,0)), 'constant' ,constant_values=pad_value)
#~ imgplot = plt.imshow(scrambled_image, cmap = cm.Greys_r, interpolation='nearest')
#~ plt.show()
toimage(scrambled_image, cmin=0, cmax=255).save(input_folder+path.splitext(pic)[0]+'_cell'+str("%03d" % (cell_size))+'rand.jpg') # use this instead of imsave to avoide rescaling to maximal dynamic range
print('Done!')
| gpl-3.0 |
musically-ut/statsmodels | statsmodels/sandbox/examples/ex_mixed_lls_re.py | 34 | 5393 | # -*- coding: utf-8 -*-
"""Example using OneWayMixed
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects, and uses OneWayMixed to estimate it.
This is a variation on ex_mixed_lls_0.py. Here we only have a single
individual specific constant, that is just a random effect without exogenous
regressors.
"""
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
np.random.seed(978326)
nsubj = 2000
units = []
nobs_i = 4 #number of observations per unit, changed below
nx = 0 #number fixed effects
nz = 1 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
#store true parameter for checking
gamma_re_true.append(gamma_re)
#for testing unbalanced case, let's change nobs per unit
if i > nsubj//4:
nobs_i = 6
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
Z = np.random.standard_normal((nobs_i, nz-1))
Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
X = np.hstack((X,Z))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print dir(m)
#print vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('std of above')
#need atleast_1d or diag raises exception
print(np.sqrt(np.diag(np.atleast_1d(np.cov(b_re, rowvar=0)))))
print('m.cov_random()')
print(m.cov_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print()
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print('llf', res.llf) #based on MLE, does not include constant
print('tvalues', res.tvalues)
print('pvalues', res.pvalues)
print(res.t_test([1]))
print('test mean of both random effects variables is zero')
print(res.f_test([[1]]))
plots = res.plot_random_univariate(bins=50)
#fig = res.plot_scatter_pairs(0, 1) #no pairs
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
OFAI/hub-toolbox-python3 | hub_toolbox/distances.py | 1 | 13154 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of the HUB TOOLBOX available at
https://github.com/OFAI/hub-toolbox-python3/
The HUB TOOLBOX is licensed under the terms of the GNU GPLv3.
(c) 2011-2018, Dominik Schnitzer, Roman Feldbauer
Austrian Research Institute for Artificial Intelligence (OFAI)
Contact: <[email protected]>
"""
import ctypes
from multiprocessing import Pool, cpu_count, RawArray
import numpy as np
from scipy.spatial.distance import cdist, pdist, squareform
try: # for scikit-learn >= 0.18
from sklearn.model_selection import StratifiedShuffleSplit
except ImportError: # lower scikit-learn versions
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics.pairwise import pairwise_distances
from hub_toolbox.io import check_vector_matrix_shape_fits_labels
from hub_toolbox.htlogging import ConsoleLogging
__all__ = ['cosine_distance', 'euclidean_distance',
'lp_norm', 'sample_distance']
def cosine_distance(X):
"""Calculate the cosine distance between all pairs of vectors in `X`."""
xn = np.sqrt(np.sum(X**2, 1))
Y = X / xn[:, np.newaxis]
del xn
D = 1. - Y.dot(Y.T)
del Y
D[D < 0] = 0
D = np.triu(D, 1) + np.triu(D, 1).T
return D
def euclidean_distance(X):
"""Calculate the euclidean distances between all pairs of vectors in `X`.
Consider using sklearn.metric.pairwise.euclidean_distances for faster,
but less accurate distances (not necessarily symmetric, too)."""
return squareform(pdist(X, 'euclidean'))
def lp_norm(X:np.ndarray, Y:np.ndarray=None, p:float=None, n_jobs:int=1):
"""Calculate Minkowski distances with L^p norm.
Calculate distances between all pairs of vectors within `X`, if `Y` is None.
Otherwise calculate distances distances between all vectors in `X` against
all vectors in `Y`. For example, this is useful if only distances from
test data to training data are required.
Parameters
----------
X : ndarray
Vector data (e.g. test set)
Y : ndarray, optional, default: None
Vector data (e.g. training set)
p : float, default: None
Minkowski norm
n_jobs : int, default: 1
Parallel computation with multiple processes. See the scikit-learn
docs for for more details.
Returns
-------
D : ndarray
Distance matrix based on Lp-norm
See also
--------
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html
"""
if p is None:
raise ValueError("Please define the `p` parameter for lp_norm().")
elif p == 1.: # Use efficient version for cityblock distances
return pairwise_distances(X=X, Y=Y, metric='l1',
n_jobs=n_jobs)
elif p == 2.: # Use efficient version for Euclidean distances
return pairwise_distances(X=X, Y=Y, metric='l2',
n_jobs=n_jobs)
else: # Use general, less efficient version for general Minkowski distances
return pairwise_distances(X=X, Y=Y, metric='minkowski',
n_jobs=n_jobs, **{'p' : p})
#===============================================================================
# #=============================================================================
# #
# # m_p dissimilarity
# #
# #=============================================================================
#===============================================================================
def _mp_load_shared_Y(Y_, n_bins_):
global Y, n_bins
Y = Y_
n_bins = n_bins_
def _mp_load_shared_data(X_, Y_, p_, n_bins_, R_bins_, R_bins_np_,
X_bins_, X_bins_np_, Y_bins_, Y_bins_np_, mp_, mp_np_):
global X, Y, n_bins, n_x, n_y, d, p
global X_bins, X_bins_np, Y_bins, Y_bins_np, R_bins, R_bins_np, mp, mp_np
X = X_
Y = Y_
n_bins = n_bins_
n_x, d = X.shape
n_y = Y.shape[0]
p = p_
R_bins = R_bins_
R_bins_np = R_bins_np_
X_bins = X_bins_
X_bins_np = X_bins_np_
Y_bins = Y_bins_
Y_bins_np = Y_bins_np_
mp = mp_
mp_np = mp_np_
def _mp_find_bin_edges(i):
return np.partition(Y[:, i], kth=kth)[kth]
def _mp_calc_histograms(i):
bins = _mp_find_bin_edges(i)
return np.histogram(Y[:, i], bins=bins)
def _mp_calc_histograms_n_bins(i):
return np.histogram(Y[:, i], bins=n_bins)
def _mp_create_r_bins(i):
hist, _ = histograms[i]
for b in range(n_bins):
R_bins_np[i, b, b:] = np.cumsum(hist[b:])
R_bins_np[i] += np.triu(R_bins_np[i], k=1).T
return
def _mp_estimate_r(i):
# Binning. Values outside the range are binned into the first/last bin
_, bin_edges = histograms[i]
bin_x = np.digitize(X[:, i], bins=bin_edges)
bin_x -= 1
np.clip(bin_x, 0, n_bins-1, out=bin_x)
bin_y = np.digitize(Y[:, i], bins=bin_edges)
bin_y -= 1
np.clip(bin_y, 0, n_bins-1, out=bin_y)
X_bins_np[i, :] = bin_x
Y_bins_np[i, :] = bin_y
return
def _mp_calc_mp_dissim(x):
mp_xy = np.zeros(n_y, dtype=float)
for i in range(d):
tmp = R_bins_np[i, X_bins_np[i, x], Y_bins_np[i, :]] / (n_x + n_y)
tmp **= p
mp_xy += tmp
mp_xy /= d
mp_xy **= (1. / p)
mp_np[x, :] = mp_xy
return
def mp_dissim(X:np.ndarray, Y:np.ndarray=None, p:float=2,
n_bins:int=0, bin_size:str='range', n_jobs:int=1, verbose:int=0):
""" Calculate m_p dissimilarity.
The data-dependent m_p dissimilarity measure considers the relative
positions of objects x and y with respect to the rest of the data
distribution in each dimension [1]_.
Parameters
----------
X : ndarray
Vector data (e.g. test set), shape (n_x, d)
Y : ndarray, optional, default: None
Vector data (e.g. training set), shape (n_y, d).
Number of features ``d`` must be equal in `X` and `Y`.
p : float, optional, default: 2
Parameter, similar to `p` in Minkowski norm
n_bins : int, optional, default: 0
Number of bins for probability mass estimation
bin_size : str, optional, default: 'range'
Strategy for binning. May be one of:
'range' ... create bins with uniform range length
'mass' ... create bins with approx. uniform mass
n_jobs : int, optional, default: 1
Parallel computation with multiple processes.
verbose : int, optional, default: 0
Increasing level of output
Returns
-------
D : ndarray, shape (X.shape[0], Y.shape[0])
m_p dissimilarity matrix
References
----------
.. [1] Aryal et al. (2017). Data-dependent dissimilarity measure: an
effective alternative to geometric distance measures.
Knowledge and Information Systems, Springer-Verlag London.
"""
# Some preparation
n_x, d = X.shape
# All-against-all in X, or X against Y?
if Y is None:
Y = X
n_y, d_y = Y.shape
# X and Y must have same dimensionality
assert d == d_y
if n_jobs == -1:
n_jobs = cpu_count()
n_bins = int(n_bins)
if p == 0:
log = ConsoleLogging()
log.warning('Got mpDisSim parameter p=0. Changed to default '
'value p=2 instead, in order to avoid zero division.')
p = 2.
# RawArrays have no locks. Must take EXTREME CARE!!
R_bins = RawArray(ctypes.c_int32, d * n_bins * n_bins)
R_bins_np = np.frombuffer(R_bins, dtype=np.int32).reshape((d, n_bins, n_bins))
X_bins = RawArray(ctypes.c_int32, d * n_x)
X_bins_np = np.frombuffer(X_bins, dtype=np.int32).reshape((d, n_x))
Y_bins = RawArray(ctypes.c_int32, d * n_y)
Y_bins_np = np.frombuffer(Y_bins, dtype=np.int32).reshape((d, n_y))
mp = RawArray(ctypes.c_double, n_x * n_y)
mp_np = np.frombuffer(mp).reshape((n_x, n_y))
global histograms, kth
kth = np.arange(0, n_y)[0:n_y:int(n_y/n_bins)]
if kth[-1] != n_y - 1:
kth = np.append(kth, n_y-1)
if verbose:
print("Creating bins for estimating probability data mass.")
with Pool(processes=n_jobs,
initializer=_mp_load_shared_Y,
initargs=(Y, n_bins)) as pool:
if 'mass'.startswith(bin_size):
histograms = pool.map(func=_mp_calc_histograms,
iterable=range(d))
elif 'range'.startswith(bin_size):
histograms = pool.map(func=_mp_calc_histograms_n_bins,
iterable=range(d))
else:
raise ValueError("{}' is not a valid value for `bin_size`. "
"Please use 'range' or 'mass'.".format(bin_size))
# The second pool needs `histograms`
with Pool(processes=n_jobs,
initializer=_mp_load_shared_data,
initargs=(X, Y, p, n_bins, R_bins, R_bins_np, X_bins, X_bins_np,
Y_bins, Y_bins_np, mp, mp_np)) as pool:
pool.map(func=_mp_create_r_bins, iterable=range(d))
if verbose:
print("Estimating probability data mass in all regions R_i(x,y).")
pool.map(func=_mp_estimate_r, iterable=range(d))
if verbose:
print("Calculating m_p dissimilarity for all pairs x, y.")
pool.map(func=_mp_calc_mp_dissim, iterable=range(n_x))
if verbose:
print("Done.")
return mp_np
def sample_distance(X, y, sample_size, metric='euclidean', strategy='a',
random_state=None):
"""Calculate incomplete distance matrix.
Parameters
----------
X : ndarray
Input vector data.
y : ndarray
Input labels (used for stratified sampling).
sample_size : int or float
If float, must be between 0.0 and 1.0 and represent the proportion of
the dataset for which distances should be calculated to.
If int, represents the absolute number of sample distances.
NOTE: See also the notes to the return value `y_sample`!
metric : any scipy.spatial.distance.cdist metric (default: 'euclidean')
Metric used to calculate distances.
strategy : 'a', 'b' (default: 'a')
- 'a': Stratified sampling, for all points the distances to the
same points are chosen.
- 'b': Stratified sampling, for each point it is chosen independently,
to which other points distances are calculated.
NOTE: currently not implemented.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
D : ndarray
The ``n x s`` distance matrix, where ``n`` is the dataset size and
``s`` is the sample size.
y_sample : ndarray
The index array that determines, which column in `D` corresponds
to which data point.
NOTE: The size of `y_sample` may be slightly higher than defined by
`sample_size` in order to meet stratification requirements!
Thus, please always check the size in the downstream workflow.
Notes
-----
Only calculate distances to a fixed number/fraction of all ``n`` points.
These ``s`` points are sampled according to the chosen strategy (see above).
In other words, calculate the distance from all points to each point
in the sample to obtain a ``n x s`` distance matrix.
"""
check_vector_matrix_shape_fits_labels(X, y)
n = X.shape[0]
if not isinstance(sample_size, int):
sample_size = int(sample_size * n)
if strategy == 'a':
try: # scikit-learn == 0.18
sss = StratifiedShuffleSplit(n_splits=1, test_size=sample_size,
random_state=random_state)
_, y_sample = sss.split(X=X, y=y)
except ValueError: # scikit-learn >= 0.18.1
_, y_sample = next(sss.split(X=X, y=y))
except TypeError: # scikit-learn < 0.18
sss = StratifiedShuffleSplit(y=y, n_iter=1, test_size=sample_size,
random_state=random_state)
_, y_sample = next(iter(sss))
elif strategy == 'b':
raise NotImplementedError("Strategy 'b' is not yet implemented.")
#=======================================================================
# y_sample = np.zeros((n, sample_size))
# try: # scikit-learn >= 0.18
# for i in range(n):
# sss = StratifiedShuffleSplit(n_splits=1, test_size=sample_size)
# _, y_sample[i, :] = sss.split(X=y, y=y)
# except TypeError: # scikit-learn < 0.18
# for i in range(n):
# sss = StratifiedShuffleSplit(y=y, n_iter=1, test_size=sample_size)
# _, y_sample[i, :] = next(iter(sss))
# # TODO will need to adapt cdist call below...
#=======================================================================
else:
raise NotImplementedError("Strategy", strategy, "unknown.")
D = cdist(X, X[y_sample, :], metric=metric)
return D, y_sample
| gpl-3.0 |
duncanmmacleod/gwpy | gwpy/plot/tests/test_utils.py | 3 | 1615 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.text`
"""
import itertools
from matplotlib import (
colors as mpl_colors,
)
from .. import utils as plot_utils
def test_color_cycle():
cyc = plot_utils.color_cycle()
assert isinstance(cyc, itertools.cycle)
assert next(cyc) == mpl_colors.to_hex("C0")
def test_color_cycle_arg():
cyc = plot_utils.color_cycle(['1', '2', '3'])
assert isinstance(cyc, itertools.cycle)
assert next(cyc) == '1'
assert next(cyc) == '2'
assert next(cyc) == '3'
assert next(cyc) == '1'
def test_marker_cycle():
cyc = plot_utils.marker_cycle()
assert isinstance(cyc, itertools.cycle)
assert next(cyc) == 'o'
def test_marker_cycle_arg():
cyc = plot_utils.marker_cycle(['1', '2', '3'])
assert isinstance(cyc, itertools.cycle)
assert next(cyc) == '1'
assert next(cyc) == '2'
assert next(cyc) == '3'
assert next(cyc) == '1'
| gpl-3.0 |
oscarbranson/latools | latools/filtering/classifier_obj.py | 1 | 8422 | import numpy as np
from sklearn import preprocessing
import sklearn.cluster as cl
from latools.helpers.stat_fns import nominal_values
class classifier(object):
def __init__(self, analytes, sort_by=0):
"""
Object to fit then apply a classifier.
Parameters
----------
analytes : str or array-like
The analytes used by the clustring algorithm
Returns
-------
classifier object
"""
if isinstance(analytes, str):
self.analytes = [analytes]
else:
self.analytes = analytes
self.sort_by = sort_by
return
def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
"""
if len(self.analytes) == 1:
# if single analyte
d = nominal_values(data[self.analytes[0]])
ds = np.array(list(zip(d, np.zeros(len(d)))))
else:
# package multiple analytes
d = [nominal_values(data[a]) for a in self.analytes]
ds = np.vstack(d).T
# identify all nan values
finite = np.isfinite(ds).sum(1) == ds.shape[1]
# remember which values are sampled
sampled = np.arange(data[self.analytes[0]].size)[finite]
# remove all nan values
ds = ds[finite]
if scale:
ds = self.scaler.transform(ds)
return ds, sampled
def fitting_data(self, data):
"""
Function to format data for cluster fitting.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
Returns
-------
A data array for initial cluster fitting.
"""
ds_fit, _ = self.format_data(data, scale=False)
# define scaler
self.scaler = preprocessing.StandardScaler().fit(ds_fit)
# scale data and return
return self.scaler.transform(ds_fit)
def fit_kmeans(self, data, n_clusters, **kwargs):
"""
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
n_clusters : int
The number of clusters in the data.
**kwargs
passed to `sklearn.cluster.KMeans`.
Returns
-------
Fitted `sklearn.cluster.KMeans` object.
"""
km = cl.KMeans(n_clusters=n_clusters, **kwargs)
km.fit(data)
return km
def fit_meanshift(self, data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Fit MeanShift clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
bandwidth : float
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs
passed to `sklearn.cluster.MeanShift`.
Returns
-------
Fitted `sklearn.cluster.MeanShift` object.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding)
ms.fit(data)
return ms
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return
def predict(self, data):
"""
Label new data with cluster identities.
Parameters
----------
data : dict
A data dict containing the same analytes used to
fit the classifier.
sort_by : str
The name of an analyte used to sort the resulting
clusters. If None, defaults to the first analyte
used in fitting.
Returns
-------
array of clusters the same length as the data.
"""
size = data[self.analytes[0]].size
ds, sampled = self.format_data(data)
# predict clusters
cs = self.classifier.predict(ds)
# map clusters to original index
clusters = self.map_clusters(size, sampled, cs)
return clusters
def map_clusters(self, size, sampled, clusters):
"""
Translate cluster identity back to original data size.
Parameters
----------
size : int
size of original dataset
sampled : array-like
integer array describing location of finite values
in original data.
clusters : array-like
integer array of cluster identities
Returns
-------
list of cluster identities the same length as original
data. Where original data are non-finite, returns -2.
"""
ids = np.zeros(size, dtype=int)
ids[:] = -2
ids[sampled] = clusters
return ids
def sort_clusters(self, data, cs, sort_by):
"""
Sort clusters by the concentration of a particular analyte.
Parameters
----------
data : dict
A dataset containing sort_by as a key.
cs : array-like
An array of clusters, the same length as values of data.
sort_by : str
analyte to sort the clusters by
Returns
-------
array of clusters, sorted by mean value of sort_by analyte.
"""
# label the clusters according to their contents
sdat = data[sort_by]
means = []
nclusts = np.arange(cs.max() + 1)
for c in nclusts:
means.append(np.nanmean(sdat[cs == c]))
# create ranks
means = np.array(means)
rank = np.zeros(means.size)
rank[np.argsort(means)] = np.arange(means.size)
csn = cs.copy()
for c, o in zip(nclusts, rank):
csn[cs == c] = o
return csn
| mit |
idanivanov/catdtree | catdtree/base_decision_tree.py | 1 | 2736 | from sklearn.base import BaseEstimator
from . import TreeNode
# API Standards: http://scikit-learn.org/stable/developers/contributing.html#rolling-your-own-estimator
class BaseDecisionTree(BaseEstimator):
"""Base class representing a decision tree.
This class inherits the scikit-learn BaseEstimator class. However, it is
not equivalent to the scikit-learn BaseDecisionTree class. This class is
supposed to be a completely compatible scikit-learn Estimator.
"""
def __init__(self):
"""Construct the bese decision tree."""
self.tree = TreeNode(None, 'Root')
def _choose_best_split(self, X_part, y_part):
"""Choose the best split for single step of the tree construction.
This function needs to be overriden by the specific decision tree
algorithm being used.
Args:
* X_part: pandas.DataFrame. The data of the independent variables
which reach the current tree node.
* y_part: pandas.Series. The data of the dependent variable
regarding `X_part`.
Returns:
A tuple (condition_str, split_filter). For more info see the docs
of catdtree.TreeNode.__init__.
"""
raise NotImplementedError('Override this method.')
def _split(self, X_part, y_part, tree_node):
"""Recursively construct the decision tree.
Args:
* X_part: pandas.DataFrame. The data of the independent variables
which reach the current `tree_node`.
* y_part: pandas.Series. The data of the dependent variable
regarding `X_part`.
* tree_node: catdtree.TreeNode. The current node where the split is
considered.
"""
best_split = self._choose_best_split(X_part, y_part)
if best_split:
for condition_str, split_filter in best_split:
X_part_branch, y_part_branch = split_filter(X_part, y_part)
tree_node_child = TreeNode(split_filter, condition_str)
tree_node.add_child(tree_node_child)
self._split(X_part_branch, y_part_branch, tree_node_child)
def fit(self, X, y):
"""Construct the decision tree over the given data.
Args:
* X: pandas.DataFrame. The data of the independent variables.
* y: pandas.Series. The data of the dependent variable regarding
`X`.
Returns:
self
"""
self._split(X, y, self.tree)
return self
def predict(self, X):
"""TODO."""
pass
def get_params(self):
"""TODO."""
pass
def set_params(self):
"""TODO."""
pass
| mit |
ofgulban/scikit-image | doc/examples/features_detection/plot_corner.py | 34 | 1160 | """
================
Corner detection
================
Detect corner points using the Harris corner detector and determine subpixel
position of corners.
.. [1] http://en.wikipedia.org/wiki/Corner_detection
.. [2] http://en.wikipedia.org/wiki/Interest_point_detection
"""
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import corner_harris, corner_subpix, corner_peaks
from skimage.transform import warp, AffineTransform
from skimage.draw import ellipse
tform = AffineTransform(scale=(1.3, 1.1), rotation=1, shear=0.7,
translation=(210, 50))
image = warp(data.checkerboard(), tform.inverse, output_shape=(350, 350))
rr, cc = ellipse(310, 175, 10, 100)
image[rr, cc] = 1
image[180:230, 10:60] = 1
image[230:280, 60:110] = 1
coords = corner_peaks(corner_harris(image), min_distance=5)
coords_subpix = corner_subpix(image, coords, window_size=13)
fig, ax = plt.subplots()
ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray)
ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3)
ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
ax.axis((0, 350, 350, 0))
plt.show()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/linear_model/bayes.py | 50 | 16145 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_offset, y_offset, X_scale)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset, y_offset, X_scale)
return self
| mit |
datapythonista/pandas | web/pandas_web.py | 7 | 11764 | #!/usr/bin/env python3
"""
Simple static site generator for the pandas web.
pandas_web.py takes a directory as parameter, and copies all the files into the
target directory after converting markdown files into html and rendering both
markdown and html files with a context. The context is obtained by parsing
the file ``config.yml`` in the root of the source directory.
The file should contain:
```
main:
template_path: <path_to_the_jinja2_templates_directory>
base_template: <template_file_all_other_files_will_extend>
ignore:
- <list_of_files_in_the_source_that_will_not_be_copied>
github_repo_url: <organization/repo-name>
context_preprocessors:
- <list_of_functions_that_will_enrich_the_context_parsed_in_this_file>
markdown_extensions:
- <list_of_markdown_extensions_that_will_be_loaded>
```
The rest of the items in the file will be added directly to the context.
"""
import argparse
import datetime
import importlib
import operator
import os
import re
import shutil
import sys
import time
import typing
import feedparser
import jinja2
import markdown
import requests
import yaml
class Preprocessors:
"""
Built-in context preprocessors.
Context preprocessors are functions that receive the context used to
render the templates, and enriches it with additional information.
The original context is obtained by parsing ``config.yml``, and
anything else needed just be added with context preprocessors.
"""
@staticmethod
def navbar_add_info(context):
"""
Items in the main navigation bar can be direct links, or dropdowns with
subitems. This context preprocessor adds a boolean field
``has_subitems`` that tells which one of them every element is. It
also adds a ``slug`` field to be used as a CSS id.
"""
for i, item in enumerate(context["navbar"]):
context["navbar"][i] = dict(
item,
has_subitems=isinstance(item["target"], list),
slug=(item["name"].replace(" ", "-").lower()),
)
return context
@staticmethod
def blog_add_posts(context):
"""
Given the blog feed defined in the configuration yaml, this context
preprocessor fetches the posts in the feeds, and returns the relevant
information for them (sorted from newest to oldest).
"""
tag_expr = re.compile("<.*?>")
posts = []
# posts from the file system
if context["blog"]["posts_path"]:
posts_path = os.path.join(
context["source_path"], *context["blog"]["posts_path"].split("/")
)
for fname in os.listdir(posts_path):
if fname.startswith("index."):
continue
link = (
f"/{context['blog']['posts_path']}"
f"/{os.path.splitext(fname)[0]}.html"
)
md = markdown.Markdown(
extensions=context["main"]["markdown_extensions"]
)
with open(os.path.join(posts_path, fname)) as f:
html = md.convert(f.read())
title = md.Meta["title"][0]
summary = re.sub(tag_expr, "", html)
try:
body_position = summary.index(title) + len(title)
except ValueError:
raise ValueError(
f'Blog post "{fname}" should have a markdown header '
f'corresponding to its "Title" element "{title}"'
)
summary = " ".join(summary[body_position:].split(" ")[:30])
posts.append(
{
"title": title,
"author": context["blog"]["author"],
"published": datetime.datetime.strptime(
md.Meta["date"][0], "%Y-%m-%d"
),
"feed": context["blog"]["feed_name"],
"link": link,
"description": summary,
"summary": summary,
}
)
# posts from rss feeds
for feed_url in context["blog"]["feed"]:
feed_data = feedparser.parse(feed_url)
for entry in feed_data.entries:
published = datetime.datetime.fromtimestamp(
time.mktime(entry.published_parsed)
)
summary = re.sub(tag_expr, "", entry.summary)
posts.append(
{
"title": entry.title,
"author": entry.author,
"published": published,
"feed": feed_data["feed"]["title"],
"link": entry.link,
"description": entry.description,
"summary": summary,
}
)
posts.sort(key=operator.itemgetter("published"), reverse=True)
context["blog"]["posts"] = posts[: context["blog"]["num_posts"]]
return context
@staticmethod
def maintainers_add_info(context):
"""
Given the active maintainers defined in the yaml file, it fetches
the GitHub user information for them.
"""
context["maintainers"]["people"] = []
for user in context["maintainers"]["active"]:
resp = requests.get(f"https://api.github.com/users/{user}")
if context["ignore_io_errors"] and resp.status_code == 403:
return context
resp.raise_for_status()
context["maintainers"]["people"].append(resp.json())
return context
@staticmethod
def home_add_releases(context):
context["releases"] = []
github_repo_url = context["main"]["github_repo_url"]
resp = requests.get(f"https://api.github.com/repos/{github_repo_url}/releases")
if context["ignore_io_errors"] and resp.status_code == 403:
return context
resp.raise_for_status()
for release in resp.json():
if release["prerelease"]:
continue
published = datetime.datetime.strptime(
release["published_at"], "%Y-%m-%dT%H:%M:%SZ"
)
context["releases"].append(
{
"name": release["tag_name"].lstrip("v"),
"tag": release["tag_name"],
"published": published,
"url": (
release["assets"][0]["browser_download_url"]
if release["assets"]
else ""
),
}
)
return context
def get_callable(obj_as_str: str) -> object:
"""
Get a Python object from its string representation.
For example, for ``sys.stdout.write`` would import the module ``sys``
and return the ``write`` function.
"""
components = obj_as_str.split(".")
attrs = []
while components:
try:
obj = importlib.import_module(".".join(components))
except ImportError:
attrs.insert(0, components.pop())
else:
break
if not obj:
raise ImportError(f'Could not import "{obj_as_str}"')
for attr in attrs:
obj = getattr(obj, attr)
return obj
def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
"""
Load the config yaml as the base context, and enrich it with the
information added by the context preprocessors defined in the file.
"""
with open(config_fname) as f:
context = yaml.safe_load(f)
context["source_path"] = os.path.dirname(config_fname)
context["ignore_io_errors"] = ignore_io_errors
context.update(kwargs)
preprocessors = (
get_callable(context_prep)
for context_prep in context["main"]["context_preprocessors"]
)
for preprocessor in preprocessors:
context = preprocessor(context)
msg = f"{preprocessor.__name__} is missing the return statement"
assert context is not None, msg
return context
def get_source_files(source_path: str) -> typing.Generator[str, None, None]:
"""
Generate the list of files present in the source directory.
"""
for root, dirs, fnames in os.walk(source_path):
root = os.path.relpath(root, source_path)
for fname in fnames:
yield os.path.join(root, fname)
def extend_base_template(content: str, base_template: str) -> str:
"""
Wrap document to extend the base template, before it is rendered with
Jinja2.
"""
result = '{% extends "' + base_template + '" %}'
result += "{% block body %}"
result += content
result += "{% endblock %}"
return result
def main(
source_path: str, target_path: str, base_url: str, ignore_io_errors: bool
) -> int:
"""
Copy every file in the source directory to the target directory.
For ``.md`` and ``.html`` files, render them with the context
before copyings them. ``.md`` files are transformed to HTML.
"""
config_fname = os.path.join(source_path, "config.yml")
shutil.rmtree(target_path, ignore_errors=True)
os.makedirs(target_path, exist_ok=True)
sys.stderr.write("Generating context...\n")
context = get_context(config_fname, ignore_io_errors, base_url=base_url)
sys.stderr.write("Context generated\n")
templates_path = os.path.join(source_path, context["main"]["templates_path"])
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_path))
for fname in get_source_files(source_path):
if os.path.normpath(fname) in context["main"]["ignore"]:
continue
sys.stderr.write(f"Processing {fname}\n")
dirname = os.path.dirname(fname)
os.makedirs(os.path.join(target_path, dirname), exist_ok=True)
extension = os.path.splitext(fname)[-1]
if extension in (".html", ".md"):
with open(os.path.join(source_path, fname)) as f:
content = f.read()
if extension == ".md":
body = markdown.markdown(
content, extensions=context["main"]["markdown_extensions"]
)
content = extend_base_template(body, context["main"]["base_template"])
content = jinja_env.from_string(content).render(**context)
fname = os.path.splitext(fname)[0] + ".html"
with open(os.path.join(target_path, fname), "w") as f:
f.write(content)
else:
shutil.copy(
os.path.join(source_path, fname), os.path.join(target_path, dirname)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Documentation builder.")
parser.add_argument(
"source_path", help="path to the source directory (must contain config.yml)"
)
parser.add_argument(
"--target-path", default="build", help="directory where to write the output"
)
parser.add_argument(
"--base-url", default="", help="base url where the website is served from"
)
parser.add_argument(
"--ignore-io-errors",
action="store_true",
help="do not fail if errors happen when fetching "
"data from http sources, and those fail "
"(mostly useful to allow github quota errors "
"when running the script locally)",
)
args = parser.parse_args()
sys.exit(
main(args.source_path, args.target_path, args.base_url, args.ignore_io_errors)
)
| bsd-3-clause |
h2educ/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
zsloan/genenetwork2 | wqflask/maintenance/quantile_normalize.py | 3 | 4514 | import sys
sys.path.insert(0, './')
import MySQLdb
import urllib.parse
import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk
from flask import Flask, g, request
from wqflask import app
from utility.elasticsearch_tools import get_elasticsearch_connection
from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT, SQL_URI
def parse_db_uri():
"""Converts a database URI to the db name, host name, user name, and password"""
parsed_uri = urllib.parse.urlparse(SQL_URI)
db_conn_info = dict(
db=parsed_uri.path[1:],
host=parsed_uri.hostname,
user=parsed_uri.username,
passwd=parsed_uri.password)
print(db_conn_info)
return db_conn_info
def create_dataframe(input_file):
with open(input_file) as f:
ncols = len(f.readline().split("\t"))
input_array = np.loadtxt(open(
input_file, "rb"), delimiter="\t", skiprows=1, usecols=list(range(1, ncols)))
return pd.DataFrame(input_array)
# This function taken from https://github.com/ShawnLYU/Quantile_Normalize
def quantileNormalize(df_input):
df = df_input.copy()
# compute rank
dic = {}
for col in df:
dic.update({col: sorted(df[col])})
sorted_df = pd.DataFrame(dic)
rank = sorted_df.mean(axis=1).tolist()
# sort
for col in df:
t = np.searchsorted(np.sort(df[col]), df[col])
df[col] = [rank[i] for i in t]
return df
def set_data(dataset_name):
orig_file = "/home/zas1024/cfw_data/" + dataset_name + ".txt"
sample_list = []
with open(orig_file, 'r') as orig_fh, open('/home/zas1024/cfw_data/quant_norm.csv', 'r') as quant_fh:
for i, (line1, line2) in enumerate(zip(orig_fh, quant_fh)):
trait_dict = {}
sample_list = []
if i == 0:
sample_names = line1.split('\t')[1:]
else:
trait_name = line1.split('\t')[0]
for i, sample in enumerate(sample_names):
this_sample = {
"name": sample,
"value": line1.split('\t')[i + 1],
"qnorm": line2.split('\t')[i + 1]
}
sample_list.append(this_sample)
query = """SELECT Species.SpeciesName, InbredSet.InbredSetName, ProbeSetFreeze.FullName
FROM Species, InbredSet, ProbeSetFreeze, ProbeFreeze, ProbeSetXRef, ProbeSet
WHERE Species.Id = InbredSet.SpeciesId and
InbredSet.Id = ProbeFreeze.InbredSetId and
ProbeFreeze.Id = ProbeSetFreeze.ProbeFreezeId and
ProbeSetFreeze.Name = '%s' and
ProbeSetFreeze.Id = ProbeSetXRef.ProbeSetFreezeId and
ProbeSetXRef.ProbeSetId = ProbeSet.Id and
ProbeSet.Name = '%s'""" % (dataset_name, line1.split('\t')[0])
Cursor.execute(query)
result_info = Cursor.fetchone()
yield {
"_index": "traits",
"_type": "trait",
"_source": {
"name": trait_name,
"species": result_info[0],
"group": result_info[1],
"dataset": dataset_name,
"dataset_fullname": result_info[2],
"samples": sample_list,
"transform_types": "qnorm"
}
}
if __name__ == '__main__':
Conn = MySQLdb.Connect(**parse_db_uri())
Cursor = Conn.cursor()
# es = Elasticsearch([{
# "host": ELASTICSEARCH_HOST, "port": ELASTICSEARCH_PORT
# }], timeout=60) if (ELASTICSEARCH_HOST and ELASTICSEARCH_PORT) else None
es = get_elasticsearch_connection(for_user=False)
#input_filename = "/home/zas1024/cfw_data/" + sys.argv[1] + ".txt"
#input_df = create_dataframe(input_filename)
#output_df = quantileNormalize(input_df)
#output_df.to_csv('quant_norm.csv', sep='\t')
#out_filename = sys.argv[1][:-4] + '_quantnorm.txt'
success, _ = bulk(es, set_data(sys.argv[1]))
response = es.search(
index="traits", doc_type="trait", body={
"query": {"match": {"name": "ENSMUSG00000028982"}}
}
)
print(response)
| agpl-3.0 |
admcrae/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 60 | 2190 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
learn = tf.contrib.learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
IndraVikas/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
jayflo/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
terkkila/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
eg-zhang/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
hmendozap/auto-sklearn | autosklearn/util/pipeline.py | 1 | 2440 | # -*- encoding: utf-8 -*-
from autosklearn.constants import *
from autosklearn.pipeline.classification import SimpleClassificationPipeline
from autosklearn.pipeline.regression import SimpleRegressionPipeline
__all__ = [
'get_configuration_space',
'get_class',
'get_model'
]
def get_configuration_space(info,
include_estimators=None,
include_preprocessors=None):
include = dict()
if include_preprocessors is not None:
include['preprocessor'] = include_preprocessors
if info['task'] in REGRESSION_TASKS:
if include_estimators is not None:
include['regressor'] = include_estimators
return _get_regression_configuration_space(info, include)
else:
if include_estimators is not None:
include['classifier'] = include_estimators
return _get_classification_configuration_space(info, include)
def _get_regression_configuration_space(info, include):
sparse = False
if info['is_sparse'] == 1:
sparse = True
configuration_space = SimpleRegressionPipeline. \
get_hyperparameter_search_space(include=include,
dataset_properties={'sparse': sparse})
return configuration_space
def _get_classification_configuration_space(info, include):
task_type = info['task']
multilabel = False
multiclass = False
sparse = False
if task_type == MULTILABEL_CLASSIFICATION:
multilabel = True
if task_type == REGRESSION:
raise NotImplementedError()
if task_type == MULTICLASS_CLASSIFICATION:
multiclass = True
if task_type == BINARY_CLASSIFICATION:
pass
if info['is_sparse'] == 1:
sparse = True
dataset_properties = {
'multilabel': multilabel,
'multiclass': multiclass,
'sparse': sparse
}
return SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties=dataset_properties,
include=include)
def get_model(configuration, seed):
if 'classifier' in configuration:
return SimpleClassificationPipeline(configuration, seed)
elif 'regressor' in configuration:
return SimpleRegressionPipeline(configuration, seed)
def get_class(info):
if info['task'] in REGRESSION_TASKS:
return SimpleRegressionPipeline
else:
return SimpleClassificationPipeline
| bsd-3-clause |
apark263/tensorflow | tensorflow/contrib/learn/python/learn/estimators/debug_test.py | 40 | 32402 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)],
labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):],
labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(language, dimension=1)
]
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
saiwing-yeung/scikit-learn | sklearn/cluster/birch.py | 15 | 22726 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. If None, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
dalejung/pandas-battery | pandas_battery/target/frame.py | 1 | 1571 | from collections import OrderedDict
import itertools
import pandas as pd
import numpy as np
import pandas.util.testing as tm
from pandas_battery.tools.attrdict import attrdict
__all__ = ['frame_targets']
N = 10000
COLS = 5
FLAT_N = N * COLS
shape = (N, COLS)
data_types = OrderedDict()
data_types['int'] = range(N)
data_types['float'] = np.random.randn(N)
data_types['bool'] = np.random.randn(N) > 0
data_types['string'] = np.array([tm.rands(1) for x in range(FLAT_N)]).reshape(shape)
data_types['long_strings'] = np.array([tm.rands(30) for x in range(FLAT_N)]).reshape(shape)
indexes = OrderedDict()
indexes[''] = None
indexes['time'] = pd.date_range(start="2000", freq="D", periods=N)
indexes['period'] = pd.period_range(start="2000", freq="D", periods=N)
column_types = OrderedDict()
column_types[''] = None
column_types['strcol'] = [tm.rands(10) for x in range(COLS)]
target_args = itertools.product(data_types, indexes, column_types)
def maker(data, index, columns):
def _maker():
arr = np.array(data)
# repeat the data for each column
if arr.ndim == 1:
arr = np.repeat(arr.ravel(), COLS).reshape(shape)
return pd.DataFrame(arr, index=index, columns=columns)
return _maker
frame_targets = attrdict()
for args in target_args:
data_type, index_type, column_type = args
obj_name = '_'.join(bit for bit in list(args) + ['frame'] if bit)
data = data_types[data_type]
index = indexes[index_type]
columns = column_types[column_type]
frame_targets[obj_name] = maker(data, index, columns)
| mit |
roxyboy/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
endlessm/chromium-browser | third_party/catapult/third_party/google-endpoints/future/utils/__init__.py | 36 | 20238 | """
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
| bsd-3-clause |
EliotBryant/ShadDetector | shadDetector_testing/Gradient Based Methods/shadowmask.py | 1 | 8175 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 16:29:56 2017
@author: Eliot
shadowmask.py
"""
def kerneldirection(kernel, direction):
'''
Inputs:
kernel: standard kernel operating in y-plane top to bottom
direction: tuple specifying direction
(1,0) = x-direction, left to right
(-1,0) = x-direction, right to left
(0,1) = y-direction, top to bottom
(0,-1) = y-direction, bottom to top
## TO ADD: DIAGONALS
(1,1) = diagonal, top left to bottom right
(1,-1) = diagonal, bottom left to top right
(-1,1) = diagonal, top right to bottom left
(-1,-1) = diagonal, bottom right to top left
'''
# ensure ksize is square
multiplier = 1
(kH, kW) = kernel.shape[:2]
assert(kH == kW)
# define negative multiplyer for directionality
if direction[0] == -1 or direction[1] == -1:
multiplier = -1
else:
multiplier = 1
transposed = multiplier * np.transpose(kernel, direction)
return transposed
def trimcontours(contours, sizethreshold):
'''
function to reduce the list of contours and remove noise from contours by
thresholding out contours below a given size
'''
cont_output = []
for conts in contours:
area = cv2.contourArea(conts, False)
if area > sizethreshold:
cont_output.append(conts)
return cont_output
# 0. import necessary libraries
import os
import numpy as np
import cv2
import matplotlib.pylab as plt
from skimage import morphology, exposure, filters, img_as_ubyte, img_as_float
# 1. Read in input image
'''
Day2Run2 imageset
# change to medianblur directory
'''
thisfilepath = os.path.dirname(__file__)
loaddirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/Input Images/adaptive_medianblur_gauss"))
savedirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/shadowmasks"))
'''
Initial Run imageset
'''
#loaddirpath = os.path.abspath(os.path.join(thisfilepath, "../../data/Initial_Run/imgs_1_MEDBLUR"))
#savedirpath = os.path.abspath(os.path.join(thisfilepath, "../../data/Initial_Run/imgs_2_SHADMASK"))
sobel5 = np.array(([-2, -3, -4, -3, -2],
[-1, -2, -3, -2, -1],
[0, 0, 0, 0, 0],
[1, 2, 3, 2, 1],
[2, 3, 4, 3, 2]), dtype="float") #/2.0
directions = ((1,0),(0,1),(-1,0),(0,-1))
for imgs in os.listdir(loaddirpath):
colour = cv2.imread(loaddirpath + "/" + imgs)
c_cop = colour.copy()
image = colour[...,0]
print(imgs)
binary_combined = np.zeros_like(image, dtype=np.uint8)
nom_combined = np.zeros_like(image, dtype=np.uint8)
#
# specify kernel and the 4 directions to iterate through
for direction in directions:
grad = kerneldirection(sobel5, direction) #3
gradientimage = cv2.filter2D(image, -1, grad)
med = cv2.medianBlur(gradientimage, 11)
gradientimage = cv2.GaussianBlur(med, (13,13), 0)
# 8. Threshold out high gradients (non-shadow gradients)
ret, binarythresh = cv2.threshold(gradientimage, 55, 255, cv2.THRESH_BINARY) #alternative method
binary_combined = cv2.bitwise_or(binary_combined, binarythresh)
nom_combined = cv2.bitwise_or(nom_combined, gradientimage)
cv2.imwrite("nomcombined.png", nom_combined)
# find candidate shadow skeleton lines
bin_skimd = img_as_float(binary_combined)
skel_shad = morphology.skeletonize(bin_skimd)
skel_shad = exposure.rescale_intensity(skel_shad.astype(np.uint8), in_range=(0,1), out_range=(0,255))
# eroded binary combined to find reflection regions
eroded = cv2.erode(binary_combined, np.ones((3,3), dtype="int"), iterations=6)
#create border for reflection regions
imH, imW = eroded.shape[0], eroded.shape[1]
thick = 2
cv2.rectangle(eroded,(-1+thick, -1+thick),(imW-thick, imH-thick),255,thickness=thick)
#try it on skeletonized version of eroded
skimd = img_as_float(eroded)
skel = morphology.skeletonize(skimd)
skel = exposure.rescale_intensity(skel.astype(np.uint8), in_range=(0,1), out_range=(0,255))
# invert eroded before finding contours on reflection erosion
erodedinverted = np.bitwise_not(eroded)
(cimage2, contr2, heir2) = cv2.findContours(erodedinverted.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
heir2 = heir2[0] # get the actual inner heirarchy
# create blank binary image for drawing on
binary = np.zeros_like(cimage2)
# find reflection regions and draw on binary via hierarchy and area threshold
for component in zip(contr2, heir2):
currentContour = component[0]
currentHierarchy = component[1]
currentContourArea = cv2.contourArea(currentContour)
# print("Current Contour Area = ", currentContourArea)
if currentContourArea < 150000:
if currentHierarchy[3] < 2:
# print(currentHierarchy)
cv2.drawContours(binary, currentContour ,-1, (255), 19)
# cv2.imshow("Reflection Regions", binary)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
# bitwise_not of reflection image
invoo = np.bitwise_not(binary)
# remove skeleton shadow lines via bitwise_and with bitwise_not of reflection contours
skelplusinvoo = np.bitwise_and(skel_shad, invoo)
# cv2.imshow("Removed Reflections from Skeleton Shadow Candidates", skelplusinvoo)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
# dilate remaining shadow regions and apply a border
dilate = cv2.dilate(skelplusinvoo, (np.ones((5,5),np.uint8))*255, iterations = 5)
thick = 8
cv2.rectangle(dilate,(-1+thick, -1+thick),(imW-thick, imH-thick),255,thickness=thick*2)
#
cv2.imshow("Dilated Shadow Regions for Finding Contours on", dilate)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
## Second contour set for shadow regions
(cimage3, contr3, heir3) = cv2.findContours(dilate.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
heir3 = heir3[0] # get the actual inner heirarchy
# create Colour binary image:
colbin3 = np.stack((erodedinverted, erodedinverted, erodedinverted), axis=2)
colbin33 = np.copy(colbin3)
# create blank binary image for drawing on
binary3 = np.zeros_like(cimage2)
# calculate Otsu threshold for thresholding regions as shadow or not
ret, imagetruncated = cv2.threshold(image.copy(), 170, 255, cv2.THRESH_TRUNC) # truncating histogram
ret, imagetruncated = cv2.threshold(imagetruncated.copy(), 100, 255, cv2.THRESH_TOZERO) #truncating histogram
imagetruncated[imagetruncated==0] = 110
truncated_float = img_as_float(imagetruncated)
threshold_global_otsu = filters.threshold_otsu(truncated_float)
threshold_global_otsu = int((threshold_global_otsu*255) - 19)
print("Threshold Global Otsu Value = ", threshold_global_otsu) #using global otsu
# draw contours of shadow regions using 3 criteria: hierarchy, area and meanvalue
for component in zip(contr3, heir3):
mask = np.zeros_like(cimage2)
currentContour = component[0]
currentHierarchy = component[1]
currentContourArea = cv2.contourArea(currentContour)
cv2.fillConvexPoly(mask, currentContour, 255)
(mean_val,___, ___, ___) = cv2.mean(image,mask = mask)
#print("Current Contour Area = ", currentContourArea)
#print("Current Area Mean Value =", mean_val)
if currentContourArea < 150000:
if currentContourArea > 1200:
if currentHierarchy[3] < 3:
if mean_val < threshold_global_otsu:
print(currentHierarchy)
cv2.drawContours(binary3, currentContour ,-1, (255), 3)
cv2.fillConvexPoly(binary3, currentContour, 255)
# cv2.imshow("Applied 3 Contour Criteria and Fill Regions", binary3)
# if cv2.waitKey(0) & 0xff == 27:
# cv2.destroyAllWindows()
# cv2.imwrite(savedirpath + "/" + imgs[:3] + "shadowmask.png", binary3)
cv2.imwrite(savedirpath + "/" + imgs[:5] + "shadowmask.png", binary3) # for InitialRun dataset | gpl-3.0 |
CellModels/tyssue | tyssue/generation/from_voronoi.py | 2 | 4406 | import pandas as pd
import numpy as np
from ..config.geometry import planar_spec, bulk_spec, flat_sheet
from .utils import make_df
"""
Generate datasets and epithelia from Voronoi tessalations
-------------------------
"""
def from_3d_voronoi(voro):
"""Creates 3D (bulk geometry) datasets from a Voronoï tessalation
Parameters
----------
voro: a :class:`scipy.spatial.Voronoi` object
Returns
-------
datasets: dict
datasets suitable for :class:`Epithelium` implementation
Notes
-----
It is important to reset the index of the created epithelium after creation
Example
-------
cells = hexa_grid3d(3, 3, 3)
datasets = from_3d_voronoi(Voronoi(cells))
bulk = Epithelium('bulk', datasets)
bulk.reset_topo()
bulk.reset_index(order=True)
bulk.sanitize()
"""
specs3d = bulk_spec()
el_idx = []
n_single_faces = len(voro.ridge_vertices)
for f_idx, (rv, rp) in enumerate(zip(voro.ridge_vertices, voro.ridge_points)):
if -1 in rv:
continue
face_verts = voro.vertices[rv]
f_center = face_verts.mean(axis=0)
c0 = voro.points[rp[0]]
ctof = f_center - c0
for rv0, rv1 in zip(rv, np.roll(rv, 1, axis=0)):
fv0 = voro.vertices[rv0]
fv1 = voro.vertices[rv1]
edge_v = fv1 - fv0
fto0 = fv0 - f_center
normal = np.cross(fto0, edge_v)
dotp = np.dot(ctof, normal)
if np.sign(dotp) > 0:
el_idx.append([rv0, rv1, f_idx, rp[0]])
el_idx.append([rv1, rv0, f_idx + n_single_faces, rp[1]])
else:
el_idx.append([rv1, rv0, f_idx, rp[0]])
el_idx.append([rv0, rv1, f_idx + n_single_faces, rp[1]])
el_idx = np.array(el_idx)
coords = ["x", "y", "z"]
edge_idx = pd.Index(range(el_idx.shape[0]), name="edge")
edge_df = make_df(edge_idx, specs3d["edge"])
for i, elem in enumerate(["srce", "trgt", "face", "cell"]):
edge_df[elem] = el_idx[:, i]
vert_idx = pd.Index(range(voro.vertices.shape[0]), name="vert")
vert_df = make_df(vert_idx, specs3d["vert"])
vert_df[coords] = voro.vertices
included_verts = edge_df["srce"].unique()
vert_df = vert_df.loc[included_verts].copy()
cell_idx = pd.Index(range(voro.points.shape[0]), name="cell")
cell_df = make_df(cell_idx, specs3d["cell"])
cell_df[coords] = voro.points
included_cells = edge_df["cell"].unique()
cell_df = cell_df.loc[included_cells].copy()
included_faces = edge_df["face"].unique()
face_df = make_df(included_faces, specs3d["face"])
edge_df.sort_values(by="cell", inplace=True)
datasets = {"vert": vert_df, "edge": edge_df, "face": face_df, "cell": cell_df}
return datasets
def from_2d_voronoi(voro, specs=None):
"""Creates 2D (sheet geometry) datasets from a Voronoï tessalation
Parameters
----------
voro: a :class:`scipy.spatial.Voronoi` object
Returns
-------
datasets: dict
datasets suitable for :class:`Epithelium` implementation
"""
if specs is None:
specs = planar_spec()
el_idx = []
for rv, rp in zip(voro.ridge_vertices, voro.ridge_points):
if -1 in rv:
continue
f_center = voro.points[rp[0]]
for rv0, rv1 in zip(rv, np.roll(rv, 1, axis=0)):
fv0 = voro.vertices[rv0]
fv1 = voro.vertices[rv1]
edge_v = fv1 - fv0
fto0 = fv0 - f_center
normal = np.cross(fto0, edge_v)
if np.sign(normal) > 0:
el_idx.append([rv0, rv1, rp[0]])
else:
el_idx.append([rv0, rv1, rp[1]])
el_idx = np.array(el_idx)
coords = ["x", "y"]
edge_idx = pd.Index(range(el_idx.shape[0]), name="edge")
edge_df = make_df(edge_idx, specs["edge"])
for i, elem in enumerate(["srce", "trgt", "face"]):
edge_df[elem] = el_idx[:, i]
vert_idx = pd.Index(range(voro.vertices.shape[0]), name="vert")
vert_df = make_df(vert_idx, specs["vert"])
vert_df[coords] = voro.vertices
face_idx = pd.Index(range(voro.points.shape[0]), name="face")
face_df = make_df(face_idx, specs["face"])
face_df[coords] = voro.points
datasets = {"vert": vert_df, "edge": edge_df, "face": face_df}
return datasets
| gpl-2.0 |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part04-e12_cyclists/test/test_cyclists.py | 1 | 2031 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch, MagicMock
import numpy as np
import pandas as pd
from tmc import points
from tmc.utils import load, get_out, patch_helper, spy_decorator
module_name="src.cyclists"
cyclists = load(module_name, "cyclists")
ph = patch_helper(module_name)
@points('p04-12.1')
class Cyclists(unittest.TestCase):
def test_shape(self):
df = cyclists()
self.assertEqual(df.shape, (37128, 21), msg="Incorrect shape!")
def test_columns(self):
df = cyclists()
cols=[
"Baana",
"Viikintie",
"Ratapihantie",
"Lauttasaaren silta pohjoispuoli",
"Pitkäsilta länsipuoli",
"Pitkäsilta itäpuoli",
"Heperian puisto/Ooppera",
"Munkkiniemi silta pohjoispuoli",
"Munkkiniemen silta eteläpuoli",
"Merikannontie",
"Lauttasaaren silta eteläpuoli",
"Käpylä, Pohjoisbaana",
"Kuusisaarentie",
"Kulosaaren silta po. ",
"Kulosaaren silta et.",
"Kaivokatu",
"Kaisaniemi/Eläintarhanlahti",
"Huopalahti (asema)",
"Eteläesplanadi",
"Auroransilta",
"Päivämäärä"]
np.testing.assert_array_equal(df.columns, cols[::-1], err_msg="Incorrect column names!")
def test_called(self):
method = spy_decorator(pd.core.frame.DataFrame.dropna, "dropna")
with patch.object(pd.core.frame.DataFrame, "dropna", new=method):
df = cyclists()
method.mock.assert_called()
self.assertEqual(method.mock.call_count, 2,
msg="Expected dropna method to be called twice!")
for args, kwargs in method.mock.call_args_list:
self.assertEqual(kwargs["how"], "all",
msg="Expected parameter 'all' to parameter 'how'!")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
laiy/Database_Project | third_party/nltk/tokenize/texttiling.py | 1 | 16605 | # Natural Language Toolkit: TextTiling
#
# Copyright (C) 2001-2015 NLTK Project
# Author: George Boutsioukis
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import re
import math
try:
import numpy
except ImportError:
pass
from nltk.tokenize.api import TokenizerI
BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1
LC, HC = 0, 1
DEFAULT_SMOOTHING = [0]
class TextTilingTokenizer(TokenizerI):
"""Tokenize a document into topical sections using the TextTiling algorithm.
This algorithm detects subtopic shifts based on the analysis of lexical
co-occurrence patterns.
The process starts by tokenizing the text into pseudosentences of
a fixed size w. Then, depending on the method used, similarity
scores are assigned at sentence gaps. The algorithm proceeds by
detecting the peak differences between these scores and marking
them as boundaries. The boundaries are normalized to the closest
paragraph break and the segmented text is returned.
:param w: Pseudosentence size
:type w: int
:param k: Size (in sentences) of the block used in the block comparison method
:type k: int
:param similarity_method: The method used for determining similarity scores:
`BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
:type similarity_method: constant
:param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
:type stopwords: list(str)
:param smoothing_method: The method used for smoothing the score plot:
`DEFAULT_SMOOTHING` (default)
:type smoothing_method: constant
:param smoothing_width: The width of the window used by the smoothing method
:type smoothing_width: int
:param smoothing_rounds: The number of smoothing passes
:type smoothing_rounds: int
:param cutoff_policy: The policy used to determine the number of boundaries:
`HC` (default) or `LC`
:type cutoff_policy: constant
"""
def __init__(self,
w=20,
k=10,
similarity_method=BLOCK_COMPARISON,
stopwords=None,
smoothing_method=DEFAULT_SMOOTHING,
smoothing_width=2,
smoothing_rounds=1,
cutoff_policy=HC,
demo_mode=False):
if stopwords is None:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
self.__dict__.update(locals())
del self.__dict__['self']
def tokenize(self, text):
"""Return a tokenized copy of *text*, where each "token" represents
a separate topic."""
lowercase_text = text.lower()
paragraph_breaks = self._mark_paragraph_breaks(text)
text_length = len(lowercase_text)
# Tokenization step starts here
# Remove punctuation
nopunct_text = ''.join(c for c in lowercase_text
if re.match("[a-z\-\' \n\t]", c))
nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
tokseqs = self._divide_to_tokensequences(nopunct_text)
# The morphological stemming step mentioned in the TextTile
# paper is not implemented. A comment in the original C
# implementation states that it offers no benefit to the
# process. It might be interesting to test the existing
# stemmers though.
#words = _stem_words(words)
# Filter stopwords
for ts in tokseqs:
ts.wrdindex_list = [wi for wi in ts.wrdindex_list
if wi[0] not in self.stopwords]
token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
# End of the Tokenization step
# Lexical score determination
if self.similarity_method == BLOCK_COMPARISON:
gap_scores = self._block_comparison(tokseqs, token_table)
elif self.similarity_method == VOCABULARY_INTRODUCTION:
raise NotImplementedError("Vocabulary introduction not implemented")
if self.smoothing_method == DEFAULT_SMOOTHING:
smooth_scores = self._smooth_scores(gap_scores)
# End of Lexical score Determination
# Boundary identification
depth_scores = self._depth_scores(smooth_scores)
segment_boundaries = self._identify_boundaries(depth_scores)
normalized_boundaries = self._normalize_boundaries(text,
segment_boundaries,
paragraph_breaks)
# End of Boundary Identification
segmented_text = []
prevb = 0
for b in normalized_boundaries:
if b == 0:
continue
segmented_text.append(text[prevb:b])
prevb = b
if prevb < text_length: # append any text that may be remaining
segmented_text.append(text[prevb:])
if not segmented_text:
segmented_text = [text]
if self.demo_mode:
return gap_scores, smooth_scores, depth_scores, segment_boundaries
return segmented_text
def _block_comparison(self, tokseqs, token_table):
"Implements the block comparison method"
def blk_frq(tok, block):
ts_occs = filter(lambda o: o[0] in block,
token_table[tok].ts_occurences)
freq = sum([tsocc[1] for tsocc in ts_occs])
return freq
gap_scores = []
numgaps = len(tokseqs)-1
for curr_gap in range(numgaps):
score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
score = 0.0
#adjust window size for boundary conditions
if curr_gap < self.k-1:
window_size = curr_gap + 1
elif curr_gap > numgaps-self.k:
window_size = numgaps - curr_gap
else:
window_size = self.k
b1 = [ts.index
for ts in tokseqs[curr_gap-window_size+1 : curr_gap+1]]
b2 = [ts.index
for ts in tokseqs[curr_gap+1 : curr_gap+window_size+1]]
for t in token_table:
score_dividend += blk_frq(t, b1)*blk_frq(t, b2)
score_divisor_b1 += blk_frq(t, b1)**2
score_divisor_b2 += blk_frq(t, b2)**2
try:
score = score_dividend/math.sqrt(score_divisor_b1*
score_divisor_b2)
except ZeroDivisionError:
pass # score += 0.0
gap_scores.append(score)
return gap_scores
def _smooth_scores(self, gap_scores):
"Wraps the smooth function from the SciPy Cookbook"
return list(smooth(numpy.array(gap_scores[:]),
window_len = self.smoothing_width+1))
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start()-last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
def _divide_to_tokensequences(self, text):
"Divides the text into pseudosentences of fixed size"
w = self.w
wrdindex_list = []
matches = re.finditer("\w+", text)
for match in matches:
wrdindex_list.append((match.group(), match.start()))
return [TokenSequence(i/w, wrdindex_list[i:i+w])
for i in range(0, len(wrdindex_list), w)]
def _create_token_table(self, token_sequences, par_breaks):
"Creates a table of TokenTableFields"
token_table = {}
current_par = 0
current_tok_seq = 0
pb_iter = par_breaks.__iter__()
current_par_break = next(pb_iter)
if current_par_break == 0:
try:
current_par_break = next(pb_iter) #skip break at 0
except StopIteration:
raise ValueError(
"No paragraph breaks were found(text too short perhaps?)"
)
for ts in token_sequences:
for word, index in ts.wrdindex_list:
try:
while index > current_par_break:
current_par_break = next(pb_iter)
current_par += 1
except StopIteration:
#hit bottom
pass
if word in token_table:
token_table[word].total_count += 1
if token_table[word].last_par != current_par:
token_table[word].last_par = current_par
token_table[word].par_count += 1
if token_table[word].last_tok_seq != current_tok_seq:
token_table[word].last_tok_seq = current_tok_seq
token_table[word]\
.ts_occurences.append([current_tok_seq,1])
else:
token_table[word].ts_occurences[-1][1] += 1
else: #new word
token_table[word] = TokenTableField(first_pos=index,
ts_occurences= \
[[current_tok_seq,1]],
total_count=1,
par_count=1,
last_par=current_par,
last_tok_seq= \
current_tok_seq)
current_tok_seq += 1
return token_table
def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores)/len(depth_scores)
stdev = numpy.std(depth_scores)
#SB: what is the purpose of this conditional?
if self.cutoff_policy == LC:
cutoff = avg-stdev/2.0
else:
cutoff = avg-stdev/2.0
depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
depth_tuples.reverse()
hp = filter(lambda x:x[0]>cutoff, depth_tuples)
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: #undo if there is a boundary close already
if dt[1] != dt2[1] and abs(dt2[1]-dt[1]) < 4 \
and boundaries[dt2[1]] == 1:
boundaries[dt[1]] = 0
return boundaries
def _depth_scores(self, scores):
"""Calculates the depth of each gap, i.e. the average difference
between the left and right peaks and the gap's score"""
depth_scores = [0 for x in scores]
#clip boundaries: this holds on the rule of thumb(my thumb)
#that a section shouldn't be smaller than at least 2
#pseudosentences for small texts and around 5 for larger ones.
clip = min(max(len(scores)/10, 2), 5)
index = clip
for gapscore in scores[clip:-clip]:
lpeak = gapscore
for score in scores[index::-1]:
if score >= lpeak:
lpeak = score
else:
break
rpeak = gapscore
for score in scores[index:]:
if score >= rpeak:
rpeak = score
else:
break
depth_scores[index] = lpeak + rpeak - 2 * gapscore
index += 1
return depth_scores
def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
"""Normalize the boundaries identified to the original text's
paragraph breaks"""
norm_boundaries = []
char_count, word_count, gaps_seen = 0, 0, 0
seen_word = False
for char in text:
char_count += 1
if char in " \t\n" and seen_word:
seen_word = False
word_count += 1
if char not in " \t\n" and not seen_word:
seen_word=True
if gaps_seen < len(boundaries) and word_count > \
(max(gaps_seen*self.w, self.w)):
if boundaries[gaps_seen] == 1:
#find closest paragraph break
best_fit = len(text)
for br in paragraph_breaks:
if best_fit > abs(br-char_count):
best_fit = abs(br-char_count)
bestbr = br
else:
break
if bestbr not in norm_boundaries: #avoid duplicates
norm_boundaries.append(bestbr)
gaps_seen += 1
return norm_boundaries
class TokenTableField(object):
"""A field in the token table holding parameters for each token,
used later in the process"""
def __init__(self,
first_pos,
ts_occurences,
total_count=1,
par_count=1,
last_par=0,
last_tok_seq=None):
self.__dict__.update(locals())
del self.__dict__['self']
class TokenSequence(object):
"A token list with its original length and its index"
def __init__(self,
index,
wrdindex_list,
original_length=None):
original_length=original_length or len(wrdindex_list)
self.__dict__.update(locals())
del self.__dict__['self']
#Pasted from the SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
def smooth(x,window_len=11,window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the beginning and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:return: the smoothed signal
example::
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
:see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = numpy.ones(window_len,'d')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def demo(text=None):
from nltk.corpus import brown
from matplotlib import pylab
tt = TextTilingTokenizer(demo_mode=True)
if text is None: text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| apache-2.0 |
michigraber/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
IndicoDataSolutions/ClusterRSS | cluster/clustering/clusterer.py | 1 | 1546 | from sklearn.cluster import DBSCAN
import numpy as np
from ..errors import ClusterError
class DBScanClusterer(object):
def __init__(self, feature_vectors, algorithm="brute", metric="cosine", **kwargs):
self.feature_vectors = feature_vectors
self.kwargs = kwargs
if not feature_vectors.shape[0]:
raise ClusterError('empty results')
kwargs.update({
"algorithm": algorithm,
"metric": metric
})
def get_clusters(self, eps_range=[0.001, 0.002, 0.003, 0.0035, 0.004, 0.0045, 0.005, 0.0055, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, .1]):
best_num_clusters = 0
best_fitted_response, best_cluster = 0, None
for epsilon in eps_range:
clusterer = DBSCAN(eps=epsilon, **self.kwargs)
fitted_response = clusterer.fit_predict(self.feature_vectors)
# Check if there are more clusters
num_unique_responses = len(set(fitted_response))
if num_unique_responses > best_num_clusters:
best_num_clusters = num_unique_responses
best_cluster = clusterer
best_fitted_response = fitted_response
if not best_cluster.components_.shape[0]:
similarities = [1] * self.feature_vectors.shape[0]
else:
similarities = np.max(self.feature_vectors.dot(best_cluster.components_.T), axis = 1)
return best_fitted_response, similarities.tolist()
| mit |
vdrhtc/Measurement-automation | One_sweep.py | 1 | 2666 | import time
import numpy as np
import matplotlib.cm
import matplotlib.colors
from Drivers.Yokogawa_GS200 import Yokogawa_GS210
from Drivers.Agilent_PNA_L import Agilent_PNA_L
from time import sleep
import datetime
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import numpy as np
import os
import sys
mypna = Agilent_PNA_L("name_pna","PNA-L1")
mypna._visainstrument.write("CALC:PAR:DEL:ALL")
mypna._visainstrument.write("CALC:PAR:DEF:EXT 'CH1_S21_1',S21")
mypna._visainstrument.write("CALC:PAR:SEL 'CH1_S21_1'")
myyoko = Yokogawa_GS210("GPIB0::5::INSTR")
#myyoko.set_current_limits(-10e-3,10e-3)
myyoko.set_status(1)
sleep(0.01)
dt = datetime.datetime.now().strftime("%d.%m.%Y_%H-%M-%S_")
fname0 = "Data/Bolgar/A2_2/2Q/resonance_2_82_GHz/{0}".format(dt)
electrical_delay = 55e-9
power = int(sys.argv[1])#-55 #Power in dBm
if_bw = np.round(float(sys.argv[2]),1) #bandwidth (in Hz)
start_freq = float(sys.argv[3])
stop_freq = float(sys.argv[4])
step_freq = (stop_freq-start_freq)/1000
P = int((stop_freq-start_freq)/step_freq+1) #kol-vo tochek by if_freq
current = float(sys.argv[5])/1000
pars = "{:g}-{:g}GHz_{:g}mA_{:}dBm_bw{:}Hz".format(start_freq/1e9,stop_freq/1e9,current*1e3,power,if_bw)
fname = (fname0+pars+"/data")
#print(fname)
if not os.path.exists(fname0+pars):
os.makedirs(fname0+pars)
overhead_time = 31.4e-3
exp_start_time = datetime.datetime.now()
exp_duration_calc = (P/if_bw + overhead_time)/3600
print("Start_time: ", exp_start_time.ctime(),"\n","expected_duration: {0} ".format(exp_duration_calc),"hours\n")
freq = np.linspace(start_freq, stop_freq, P)
#Presetting the bias in the coil to initial bias of the sweep****************
# myyoko.set_appropriate_range(max(abs(bias)))
curstepabs = 20e-6
#****************************************
mypna.set_power(power)
mypna.set_nop(P)
mypna.set_xlim(start_freq,stop_freq)
mypna.set_bandwidth(if_bw)
mypna.set_electrical_delay(electrical_delay)
myyoko.set_current(current)
# sleep(0.02)
mypna.prepare_for_stb()
mypna.sweep_single()
mypna.wait_for_stb()
data = mypna.get_tracedata("RealImag")
sdata = data[0]+1j*data[1]
phase_data = np.angle(sdata)
amp_data = np.abs(sdata)
np.savez(fname,
P=P,
if_bw=if_bw,
power=power,
freq=freq,
sdata=sdata,
current=current,
start_freq=start_freq,
stop_freq=stop_freq
)
plt.clf()
axx=plt.subplot(111)
axx.grid(True)
axx.set_title("Amp")
axx.plot(freq,amp_data, linewidth=1) #
#plt.show()
plt.savefig(fname0+pars+'/image.jpg')
exp_stop_time = datetime.datetime.now()
#print(exp_stop_time-exp_start_time, 'executed.')
| gpl-3.0 |
akionakamura/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
andaag/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
marl/massage | tests/resynth/test_util.py | 1 | 4507 | import pretty_midi
from unittest import TestCase
from massage.resynth.util import *
import mock
import sys
sys.modules['fluidsynth'] = mock.Mock()
TEST_VOICING_FILE = os.path.join(
os.path.dirname(__file__), '../data/chord_voicings.json')
TEST_MIDI_FILE = os.path.join(
os.path.dirname(__file__), '../data/test_midi.mid')
TEST_PICK_SF_MOCK_Y = os.path.join(
os.path.dirname(__file__), '../data/test_pick_sf_mock_y.npz')
TEST_FPATH = os.path.join(
os.path.dirname(__file__), '../data/acoustic_guitar.wav')
class TestUtil(TestCase):
def test_compute_avg_mfcc_zero(self):
y = np.zeros(1000)
avg_mfcc = compute_avg_mfcc(y=y, sr=44100)
target_mfcc = np.zeros(39)
self.assertTrue(np.allclose(avg_mfcc, target_mfcc))
def test_compute_avg_mfcc_fpath(self):
avg_mfcc = compute_avg_mfcc(TEST_FPATH)
self.assertEqual(avg_mfcc.shape[0], 39)
def test_onset_offset(self):
fs = 44100
noise = np.random.random(2205) - 0.5 # 50 ms of nosie
silence = np.zeros(2205)
y = np.concatenate((silence, noise, silence, noise, silence))
target_on_t = np.array([0.05, 0.15])
target_off_t = np.array([0.10, 0.20])
on_t, off_t, on_s = onset_offset(y=y, sr=fs)
on_close = np.abs(target_on_t - on_t) < 0.03 # 30 ms slack
off_close = np.abs(target_off_t - off_t) < 0.03
self.assertTrue(np.logical_and(on_close, off_close).all())
def test_compute_envelope(self):
noise = np.random.random(220500) - 0.5 # 5s of nosie
silence = np.zeros(220500)
y = np.concatenate((silence, noise, silence, noise, silence))
env = compute_envelope(y)
print(y.shape)
# import matplotlib.pyplot as plt
# plt.plot(env/ np.max(env))
# plt.plot(y)
# plt.show()
self.assertEqual(len(env), 220500*5)
def test_get_energy_envelope(self):
y = np.zeros((2, 100000))
env = get_energy_envelope(y)
self.assertEqual(np.sum(env), 0)
self.assertEqual(env.shape, y.shape)
def test_get_energy_envelope_mono(self):
y = np.zeros(100000)
env = get_energy_envelope(y)
self.assertEqual(np.sum(env), 0)
self.assertEqual(env.shape, (1,len(y)))
@mock.patch.object(pretty_midi.PrettyMIDI, 'fluidsynth', autospec=True)
def test_pick_sf(self, mock_fluidsynth):
# synthesis something different with the sf, and try to match
mock_y = np.load(TEST_PICK_SF_MOCK_Y)['arr_0']
mock_fluidsynth.return_value = mock_y
midi_data = pretty_midi.PrettyMIDI(TEST_MIDI_FILE)
test_sf_path = os.path.join(
os.path.dirname(__file__), '../data/28MBGM.sf2')
fs = 44100
y = midi_data.fluidsynth(sf2_path=test_sf_path, fs=fs)
# np.savez('test_pick_sf_mock_y', y)
sf_path, program = pick_sf(y, fs, 'acoustic guitar')
sf_base = os.path.basename(sf_path)
self.assertEqual(program, 25)
self.assertIsInstance(sf_base, str)
# the following test should work, but doesn't... right now the sf
# picked out is 'chorium.sf2' as opposed to 28MBGM
# self.assertEqual('sf_base', '28MBGM.sf2')
def test_pick_sf2(self):
passed = False
y = np.zeros(100)
fs = 44100
try:
out = pick_sf(y, fs, 'not a instrument')
except ValueError:
passed = True
self.assertTrue(passed)
def test_amplitude_to_velocity(self):
energies = [-1, 0, 0.5, 1]
velocities = amplitude_to_velocity(energies)
self.assertListEqual(list(velocities), [60, 90, 105, 120])
def test_midi_to_jams(self):
midi_data = pretty_midi.PrettyMIDI(TEST_MIDI_FILE)
jam = midi_to_jams(midi_data)
jam_len = len(jam.annotations[0].data)
midi_len = len(midi_data.instruments[0].notes)
self.assertEqual(jam_len, midi_len)
def test_voicing_dist(self):
v1 = [1, 3, 5, 7, 9]
v2 = [1, 3, 5, 7, 10]
self.assertEqual(voicing_dist(v1, v2), 0.2)
def test_get_all_voicings(self):
voicing_dict = get_all_voicings(TEST_VOICING_FILE)
self.assertEqual(
voicing_dict['G:maj'][0], [43, 47, 50, 55, 59])
def test_choose_voicing(self):
voicing_dict = get_all_voicings(TEST_VOICING_FILE)
voicing = choose_voicing('A#:maj', voicing_dict, [43, 47, 50, 55, 59])
self.assertIsNotNone(voicing[0])
| mit |
ddempsey/PyFEHM | fpost.py | 1 | 128021 | """For reading FEHM output files."""
"""
Copyright 2013.
Los Alamos National Security, LLC.
This material was produced under U.S. Government contract DE-AC52-06NA25396 for
Los Alamos National Laboratory (LANL), which is operated by Los Alamos National
Security, LLC for the U.S. Department of Energy. The U.S. Government has rights
to use, reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES
ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified to produce
derivative works, such modified software should be clearly marked, so as not to
confuse it with the version available from LANL.
Additionally, this library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your option)
any later version. Accordingly, this library is distributed in the hope that it
will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
Public License for more details.
"""
import numpy as np
import os
try:
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import matplotlib
except ImportError:
'placeholder'
from copy import copy,deepcopy
from ftool import*
import platform
from fdflt import*
dflt = fdflt()
import pyvtk as pv
WINDOWS = platform.system()=='Windows'
if WINDOWS: copyStr = 'copy'; delStr = 'del'
else: copyStr = 'cp'; delStr = 'rm'
if True: # output variable dictionaries defined in here, indented for code collapse
cont_var_names_avs=dict([
('X coordinate (m)','x'),
('Y coordinate (m)','y'),
('Z coordinate (m)','z'),
('node','n'),
('Liquid Pressure (MPa)','P'),
('Vapor Pressure (MPa)','P_vap'),
('Capillary Pressure (MPa)','P_cap'),
('Saturation','saturation'),
('Temperature (deg C)','T'),
('Porosity','por'),
('X Permeability (log m**2)','perm_x'),
('Y Permeability (log m**2)','perm_y'),
('Z Permeability (log m**2)','perm_z'),
('X displacement (m)','disp_x'),
('Y displacement (m)','disp_y'),
('Z displacement (m)','disp_z'),
('X stress (MPa)','strs_xx'),
('Y stress (MPa)','strs_yy'),
('Z stress (MPa)','strs_zz'),
('XY stress (MPa)','strs_xy'),
('XZ stress (MPa)','strs_xz'),
('YZ stress (MPa)','strs_yz'),
('Youngs Mod (MPa)','E'),
('Excess Shear (MPa)','tau_ex'),
('Shear Angle (deg)','phi_dil'),
('Zone','zone'),
('Liquid Density (kg/m**3)','density'),
('Vapor Density (kg/m**3)','density_vap'),
('Source (kg/s)','flow'),
('Liquid Flux (kg/s)','flux'),
('Vapor Flux (kg/s)','flux_vap'),
('Volume Strain','strain'),
('Vapor X Volume Flux (m3/[m2 s])','flux_x_vap'),
('Vapor Y Volume Flux (m3/[m2 s])','flux_y_vap'),
('Vapor Z Volume Flux (m3/[m2 s])','flux_z_vap'),
('Liquid X Volume Flux (m3/[m2 s])','flux_x'),
('Liquid Y Volume Flux (m3/[m2 s])','flux_y'),
('Liquid Z Volume Flux (m3/[m2 s])','flux_z'),
])
cont_var_names_tec=dict([
('X coordinate (m)','x'),
('Y coordinate (m)','y'),
('Z coordinate (m)','z'),
('X Coordinate (m)','x'),
('Y Coordinate (m)','y'),
('Z Coordinate (m)','z'),
('node','n'),
('Node','n'),
('Liquid Pressure (MPa)','P'),
('Vapor Pressure (MPa)','P_vap'),
('Capillary Pressure (MPa)','P_cap'),
('Saturation','saturation'),
('Water Saturation','water'),
('Super-Critical/Liquid CO2 Saturation','co2_sc_liquid'),
('Gaseous CO2 Saturation','co2_gas'),
('Dissolved CO2 Mass Fraction','co2_aq'),
('CO2 Phase State','co2_phase'),
('CO2 Gas Density (kg/m**3)','density_co2_gas'),
('CO2 Liquid Density (kg/m**3)','density_co2_sc_liquid'),
('Temperature (<sup>o</sup>C)','T'),
('Temperature (deg C)','T'),
('Porosity','por'),
('X Permeability (log m**2)','perm_x'),
('Y Permeability (log m**2)','perm_y'),
('Z Permeability (log m**2)','perm_z'),
('X displacement (m)','disp_x'),
('Y displacement (m)','disp_y'),
('Z displacement (m)','disp_z'),
('X stress (MPa)','strs_xx'),
('Y stress (MPa)','strs_yy'),
('Z stress (MPa)','strs_zz'),
('XY stress (MPa)','strs_xy'),
('XZ stress (MPa)','strs_xz'),
('YZ stress (MPa)','strs_yz'),
('Youngs Mod (MPa)','E'),
('Excess Shear (MPa)','tau_ex'),
('Shear Angle (deg)','phi_dil'),
('Zone','zone'),
('Liquid Density (kg/m**3)','density'),
('Vapor Density (kg/m**3)','density_vap'),
('Source (kg/s)','flow'),
('Liquid Flux (kg/s)','flux'),
('Vapor Flux (kg/s)','flux_vap'),
('Volume Strain','strain'),
('Vapor X Volume Flux (m3/[m2 s])','flux_x_vap'),
('Vapor Y Volume Flux (m3/[m2 s])','flux_y_vap'),
('Vapor Z Volume Flux (m3/[m2 s])','flux_z_vap'),
('Liquid X Volume Flux (m3/[m2 s])','flux_x'),
('Liquid Y Volume Flux (m3/[m2 s])','flux_y'),
('Liquid Z Volume Flux (m3/[m2 s])','flux_z'),
])
cont_var_names_surf=dict([
('X coordinate (m)','x'),
('X Coordinate (m)','x'),
('X (m)','x'),
('Y coordinate (m)','y'),
('Y Coordinate (m)','y'),
('Y (m)','y'),
('Z coordinate (m)','z'),
('Z Coordinate (m)','z'),
('Z (m)','z'),
('node','n'),
('Node','n'),
('Liquid Pressure (MPa)','P'),
('Vapor Pressure (MPa)','P_vap'),
('Capillary Pressure (MPa)','P_cap'),
('Saturation','saturation'),
('Temperature (deg C)','T'),
('Porosity','por'),
('X Permeability (log m**2)','perm_x'),
('Y Permeability (log m**2)','perm_y'),
('Z Permeability (log m**2)','perm_z'),
('X displacement (m)','disp_x'),
('Y displacement (m)','disp_y'),
('Z displacement (m)','disp_z'),
('X stress (MPa)','strs_xx'),
('Y stress (MPa)','strs_yy'),
('Z stress (MPa)','strs_zz'),
('XY stress (MPa)','strs_xy'),
('XZ stress (MPa)','strs_xz'),
('YZ stress (MPa)','strs_yz'),
('Youngs Mod (MPa)','E'),
('Excess Shear (MPa)','tau_ex'),
('Shear Angle (deg)','phi_dil'),
('Zone','zone'),
('Liquid Density (kg/m**3)','density'),
('Vapor Density (kg/m**3)','density_vap'),
('Source (kg/s)','flow'),
('Liquid Flux (kg/s)','flux'),
('Vapor Flux (kg/s)','flux_vap'),
('Volume Strain','strain'),
('Vapor X Volume Flux (m3/[m2 s])','flux_x_vap'),
('Vapor Y Volume Flux (m3/[m2 s])','flux_y_vap'),
('Vapor Z Volume Flux (m3/[m2 s])','flux_z_vap'),
('Liquid X Volume Flux (m3/[m2 s])','flux_x'),
('Liquid Y Volume Flux (m3/[m2 s])','flux_y'),
('Liquid Z Volume Flux (m3/[m2 s])','flux_z'),
('Water Saturation','saturation'),
('Super-Critical/Liquid CO2 Saturation','co2_liquid'),
('Gaseous CO2 Saturation','co2_gas'),
('Dissolved CO2 Mass Fraction','co2_aq'),
('CO2 Phase State','co2_phase'),
('Aqueous_Species_001','Caq001'),
('Aqueous_Species_002','Caq002'),
('Aqueous_Species_003','Caq003'),
('Aqueous_Species_004','Caq004'),
('Aqueous_Species_005','Caq005'),
('Aqueous_Species_006','Caq006'),
('Aqueous_Species_007','Caq007'),
('Aqueous_Species_008','Caq008'),
('Aqueous_Species_009','Caq009'),
('Aqueous_Species_010','Caq010'),
('Aqueous_Species_011','Caq011'),
('Aqueous_Species_012','Caq012'),
('Aqueous_Species_013','Caq013'),
('Aqueous_Species_014','Caq014'),
('Aqueous_Species_015','Caq015'),
('Aqueous_Species_016','Caq016'),
('Aqueous_Species_017','Caq017'),
('Aqueous_Species_018','Caq018'),
('Aqueous_Species_019','Caq019'),
('Aqueous_Species_020','Caq020'),
])
hist_var_names=dict([
('denAIR','density_air'),
('disx','disp_x'),
('disy','disp_y'),
('disz','disp_z'),
('enth','enthalpy'),
('glob','global'),
('humd','humidity'),
('satr','saturation'),
('strain','strain'),
('strx','strs_xx'),
('stry','strs_yy'),
('strz','strs_zz'),
('strxy','strs_xy'),
('strxz','strs_xz'),
('stryz','strs_yz'),
('wcon','water_content'),
('denWAT','density'),
('flow','flow'),
('visAIR','viscosity_air'),
('visWAT','viscosity'),
('wt','water_table'),
('presCAP','P_cap'),
('presVAP','P_vap'),
('presWAT','P'),
('presCO2','P_co2'),
('temp','T'),
('co2md','massfrac_co2_aq'),
('co2mf','massfrac_co2_free'),
('co2mt','mass_co2'),
('co2sg','saturation_co2g'),
('co2sl','saturation_co2l'),
])
flxz_water_names = [
'water_source',
'water_sink',
'water_net',
'water_boundary',]
flxz_vapor_names = [
'vapor_source',
'vapor_sink',
'vapor_net',
'vapor_boundary',]
flxz_co2_names = [
'co2_source',
'co2_sink',
'co2_in',
'co2_out',
'co2_boundary',
'co2_sourceG',
'co2_sinkG',
'co2_inG',
'co2_outG']
class fcontour(object): # Reading and plotting methods associated with contour output data.
'''Contour output information object.
'''
def __init__(self,filename=None,latest=False,first=False,nearest=None):
if not isinstance(filename,list):
self._filename=os_path(filename)
self._silent = dflt.silent
self._times=[]
self._format = ''
self._data={}
self._material = {}
self._material_properties = []
self._row=None
self._variables=[]
self._user_variables = []
self.key_name=[]
self._keyrows={}
self.column_name=[]
self.num_columns=0
self._x = []
self._y = []
self._z = []
self._xmin = None
self._ymin = None
self._zmin = None
self._xmax = None
self._ymax = None
self._zmax = None
self._latest = latest
self._first = first
self._nearest = nearest
if isinstance(self._nearest,(float,int)): self._nearest = [self._nearest]
self._nkeys=1
if filename is not None: self.read(filename,self._latest,self._first,self._nearest)
def __getitem__(self,key):
if key in self.times:
return self._data[key]
elif np.min(abs(self.times-key)/self.times)<.01:
ind = np.argmin(abs(self.times-key))
return self._data[self.times[ind]]
else: return None
def read(self,filename,latest=False,first=False,nearest=[]): # read contents of file
'''Read in FEHM contour output information.
:param filename: File name for output data, can include wildcards to define multiple output files.
:type filename: str
:param latest: Boolean indicating PyFEHM should read the latest entry in a wildcard search.
:type latest: bool
:param first: Boolean indicating PyFEHM should read the first entry in a wildcard search.
:type first: bool
:param nearest: Read in the file with date closest to the day supplied. List input will parse multiple output files.
:type nearest: fl64,list
'''
from glob import glob
if isinstance(filename,list):
files = filename
else:
filename = os_path(filename)
files=glob(filename)
if len(files)==0:
pyfehm_print('ERROR: '+filename+' not found',self._silent)
return
# decision-making
mat_file = None
multi_type = None
# are there multiple file types? e.g., _con_ and _sca_?
# is there a material properties file? e.g., 'mat_nodes'?
file_types = []
for file in files:
if '_sca_node' in file and 'sca' not in file_types: file_types.append('sca')
if '_vec_node' in file and 'vec' not in file_types: file_types.append('vec')
if '_con_node' in file and 'con' not in file_types: file_types.append('con')
if '_hf_node' in file and 'hf' not in file_types: file_types.append('hf')
if 'mat_node' in file: mat_file = file
if self._nearest or latest or first:
files = list(filter(os.path.isfile, glob(filename)))
if mat_file: files.remove(mat_file)
files.sort(key=lambda x: os.path.getmtime(x))
files2 = []
# retrieve first created and same time in group
if first:
files2.append(files[0])
for file_type in file_types:
tag = '_'+file_type+'_node'
if tag in files2[-1]:
prefix = files2[-1].split(tag)[0]
break
for file in files:
if file.startswith(prefix) and tag not in file: files2.append(file)
# retrieve files nearest in time to given (and same time in group)
if self._nearest:
ts = []
for file in files:
file = file.split('_node')[0]
file = file.split('_sca')[0]
file = file.split('_con')[0]
file = file.split('_vec')[0]
file = file.split('_hf')[0]
file = file.split('_days')[0]
file = file.split('.')
file = file[-2:]
ts.append(float('.'.join(file)))
ts = np.unique(ts)
for near in self._nearest:
tsi = min(enumerate(ts), key=lambda x: abs(x[1]-near))[0]
files2.append(files[tsi])
for file_type in file_types:
tag = '_'+file_type+'_node'
if tag in files2[-1]:
prefix = files2[-1].split(tag)[0]
break
for file in files:
if file.startswith(prefix) and tag not in file: files2.append(file)
# retrieve last created and same time in group
if latest:
files2.append(files[-1])
for file_type in file_types:
tag = '_'+file_type+'_node'
if tag in files2[-1]:
prefix = files2[-1].split(tag)[0]
break
for file in files:
if file.startswith(prefix) and tag not in file: files2.append(file)
# removes duplicates
files = []
for file in files2:
if file not in files: files.append(file)
# group files into their types
FILES = []
for file_type in file_types:
tag = '_'+file_type+'_node'
FILES.append(sort_tec_files([file for file in files if tag in file]))
FILES = np.array(FILES)
# determine headers for 'tec' output
for i in range(FILES.shape[1]):
if not self._variables:
files = FILES[:,i]
headers = []
for file in sort_tec_files(files):
fp = open(file,'rU')
headers.append(fp.readline())
fp.close()
firstFile = self._detect_format(headers)
if self._format=='tec' and firstFile:
headers = []
for file in sort_tec_files(files):
fp = open(file,'rU')
fp.readline()
headers.append(fp.readline())
fp.close()
self._setup_headers_tec(headers)
# read in output data
for i in range(FILES.shape[1]):
files = FILES[:,i]
# Skip -1 file if present
if '-1' in files[0]: continue
for file in sort_tec_files(files): pyfehm_print(file,self._silent)
if not self._variables:
headers = []
for file in sort_tec_files(files):
fp = open(file,'rU')
headers.append(fp.readline())
fp.close()
self._detect_format(headers)
#if self._format=='tec': self._setup_headers_tec(headers)
if self._format=='avs': self._setup_headers_avs(headers,files)
elif self._format=='avsx': self._setup_headers_avsx(headers)
elif self._format=='surf': self._setup_headers_surf(headers)
else: pyfehm_print('ERROR: Unrecognised format',self._silent);return
self.num_columns = len(self.variables)+1
if self.format == 'tec': self._read_data_tec(files,mat_file)
elif self.format == 'surf': self._read_data_surf(files,mat_file)
elif self.format == 'avs': self._read_data_avs(files,mat_file)
elif self.format == 'avsx': self._read_data_avsx(files,mat_file)
# assemble grid information
if 'x' in self.variables:
self._x = np.unique(self[self.times[0]]['x'])
self._xmin,self._xmax = np.min(self.x), np.max(self.x)
if 'y' in self.variables:
self._y = np.unique(self[self.times[0]]['y'])
self._ymin,self._ymax = np.min(self.y), np.max(self.y)
if 'z' in self.variables:
self._z = np.unique(self[self.times[0]]['z'])
self._zmin,self._zmax = np.min(self.z), np.max(self.z)
if dflt.parental_cont:
print('')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('WARNING:')
print('')
print('Contour data is indexed using the Pythonic convention in which the first index is 0. FEHM node numbering convention begins at 1.')
print('')
print('THEREFORE, to get the correct contour value for a particular node, you need to pass the node index MINUS 1. Using node index to access contour data will return incorrect values.')
print('')
print('For example:')
print('>>> node10 = dat.grid.node[10]')
print('>>> c = fcontour(\'*.csv\')')
print('>>> T_node10 = c[c.times[-1]][\'T\'][node10.index - 1]')
print(' or')
print('>>> T_node10 = c[c.times[-1]][\'T\'][9]')
print('will return the correct value for node 10.')
print('')
print('Do not turn off this message unless you understand how to correctly access nodal values from contour data.')
print('To turn off this message, open the environment file \'fdflt.py\' and set self.parental_cont = False')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('')
def _detect_format(self,headers):
if headers[0].startswith('TITLE ='): # check for TEC output
self._format = 'tec'
if headers[0].startswith('ZONE '): # check for TEC output
self._format = 'tec'
return False
elif headers[0].startswith('node, '): # check for SURF output
self._format = 'surf'
elif headers[0].startswith('nodes at '): # check for AVSX output
self._format = 'avsx'
elif headers[0].split()[0].isdigit(): # check for AVS output
self._format = 'avs'
return True
def _setup_headers_avsx(self,headers): # headers for the AVSX output format
self._variables.append('n')
for header in headers:
header = header.strip().split(' : ')
for key in header[1:]:
if key in list(cont_var_names_avs.keys()):
var = cont_var_names_avs[key]
else: var = key
self._variables.append(var)
def _read_data_avsx(self,files,mat_file): # read data in AVSX format
datas = []
for file in sorted(files): # use alphabetical sorting
fp = open(file,'rU')
header = fp.readline()
if file == sorted(files)[0]:
header = header.split('nodes at ')[1]
header = header.split('days')[0]
time = float(header)*24*2600
self._times.append(time)
lns = fp.readlines()
fp.close()
datas.append(np.array([[float(d) for d in ln.strip().split(':')] for ln in lns]))
data = np.concatenate(datas,1)
self._data[time] = dict([(var,data[:,icol]) for icol,var in enumerate(self.variables)])
if mat_file and not self._material_properties:
fp = open(mat_file,'rU')
header = fp.readline()
self._material_properties = header.split(':')[1:]
lns = fp.readlines()
fp.close()
data = np.array([[float(d) for d in ln.strip().split(':')[1:]] for ln in lns])
self._material= dict([(var,data[:,icol]) for icol,var in enumerate(self._material_properties)])
def _setup_headers_avs(self,headers,files): # headers for the AVS output format
for header,file in zip(headers,files):
lns_num = int(header.strip().split()[0])
fp = open(file)
lns = [fp.readline() for i in range(lns_num+1)][1:]
fp.close()
self._variables.append('n')
for ln in lns:
varname = ln.strip().split(',')[0]
if varname in list(cont_var_names_avs.keys()):
var = cont_var_names_avs[varname]
else: var = varname
if var not in self._variables: self._variables.append(var)
def _read_data_avs(self,files,mat_file): # read data in AVS format
datas = []
for file in sorted(files):
first = (file == sorted(files)[0])
fp = open(file,'rU')
lns = fp.readlines()
lns = lns[int(float(lns[0].split()[0]))+1:]
if first:
file = file.split('_node')[0]
file = file.split('_sca')[0]
file = file.split('_con')[0]
file = file.split('_vec')[0]
file = file.split('_hf')[0]
file = file.split('_days')[0]
file = file.split('.')
file = [fl for fl in file if fl.isdigit() or 'E-' in fl]
time = float('.'.join(file))
self._times.append(time)
if first:
datas.append(np.array([[float0(d) for d in ln.strip().split()] for ln in lns]))
else:
datas.append(np.array([[float0(d) for d in ln.strip().split()[4:]] for ln in lns]))
data = np.concatenate(datas,1)
self._data[time] = dict([(var,data[:,icol]) for icol,var in enumerate(self.variables)])
def _setup_headers_surf(self,headers): # headers for the SURF output format
for header in headers:
header = header.strip().split(', ')
for key in header:
varname = key.split('"')[0]
varname = varname.strip()
if varname in list(cont_var_names_surf.keys()):
var = cont_var_names_surf[varname]
else: var = varname
if var not in self._variables: self._variables.append(var)
def _read_data_surf(self,files,mat_file): # read data in SURF format
datas = []
for file in sorted(files):
first = (file == sorted(files)[0])
fp = open(file,'rU')
lni = file.split('.',1)[1]
if first:
file = file.split('_node')[0]
file = file.split('_sca')[0]
file = file.split('_con')[0]
file = file.split('_vec')[0]
file = file.split('_hf')[0]
file = file.split('_days')[0]
file = file.split('.')
file = [fl for fl in file if fl.isdigit() or 'E-' in fl]
time = float('.'.join(file))
self._times.append(time)
lni=fp.readline()
lns = fp.readlines()
fp.close()
if first:
datas.append(np.array([[float0(d) for d in ln.strip().split(',')] for ln in lns]))
else:
datas.append(np.array([[float0(d) for d in ln.strip().split(',')[4:]] for ln in lns]))
data = np.concatenate(datas,1)
self._data[time] = dict([(var,data[:,icol]) for icol,var in enumerate(self.variables)])
if mat_file and not self._material_properties:
fp = open(mat_file,'rU')
header = fp.readline()
for mat_prop in header.split(',')[1:]:
if 'specific heat' not in mat_prop:
self._material_properties.append(mat_prop.strip())
lns = fp.readlines()
fp.close()
data = np.array([[float(d) for d in ln.strip().split(',')[1:]] for ln in lns])
self._material= dict([(var,data[:,icol]) for icol,var in enumerate(self._material_properties)])
def _setup_headers_tec(self,headers): # headers for the TEC output format
for header in headers:
header = header.split(' "')
for key in header[1:]:
varname = key.split('"')[0].strip()
if varname in list(cont_var_names_tec.keys()):
var = cont_var_names_tec[varname]
else: var = varname
if var not in self._variables: self._variables.append(var)
def _read_data_tec(self,files,mat_file): # read data in TEC format
datas = []
for file in sorted(files):
first = (file == sorted(files)[0])
fp = open(file,'rU')
ln = fp.readline()
has_xyz = False
while not ln.startswith('ZONE'):
ln = fp.readline()
has_xyz = True
if first:
lni = ln.split('"')[1]
time = lni.split('days')[0].strip()
time = float(time.split()[-1].strip())
try:
if time<self._times[0]: return
except: pass
self._times.append(time)
nds = None
if 'N =' in ln:
nds = int(ln.split('N = ')[-1].strip().split(',')[0].strip())
lns = fp.readlines()
fp.close()
if nds: lns = lns[:nds] # truncate to remove connectivity information
if has_xyz:
if first:
datas.append(np.array([[float0(d) for d in ln.strip().split()] for ln in lns]))
else:
datas.append(np.array([[float0(d) for d in ln.strip().split()[4:]] for ln in lns]))
else:
if first:
datas.append(np.array([[float0(d) for d in ln.strip().split()] for ln in lns]))
else:
datas.append(np.array([[float0(d) for d in ln.strip().split()[1:]] for ln in lns]))
data = np.concatenate(datas,1)
if data.shape[1]< len(self.variables): # insert xyz data from previous read
data2 = []
j = 0
for var in self.variables:
if var == 'x':
data2.append(self._data[self._times[0]]['x'])
elif var == 'y':
data2.append(self._data[self._times[0]]['y'])
elif var == 'z':
data2.append(self._data[self._times[0]]['z'])
elif var == 'zone':
data2.append(self._data[self._times[0]]['zone'])
else:
data2.append(data[:,j]); j +=1
data = np.transpose(np.array(data2))
self._data[time] = dict([(var,data[:,icol]) for icol,var in enumerate(self.variables)])
if mat_file and not self._material_properties:
fp = open(mat_file,'rU')
fp.readline()
header = fp.readline()
for mat_prop in header.split(' "')[5:]:
self._material_properties.append(mat_prop.split('"')[0].strip())
lns = fp.readlines()
if lns[0].startswith('ZONE'): lns = lns[1:]
fp.close()
if nds: lns = lns[:nds] # truncate to remove connectivity information
data = np.array([[float(d) for d in ln.strip().split()[4:]] for ln in lns[:-1]])
self._material= dict([(var,data[:,icol]) for icol,var in enumerate(self._material_properties)])
def _check_inputs(self,variable, time, slice): # assesses whether sufficient input information for slice plot
if not variable:
s = ['ERROR: no plot variable specified.']
s.append('Options are')
for var in self.variables: s.append(var)
s = '\n'.join(s)
pyfehm_print(s,self._silent)
return True
if time==None:
s = ['ERROR: no plot time specified.']
s.append('Options are')
for time in self.times: s.append(time)
s = '\n'.join(s)
pyfehm_print(s,self._silent)
return True
if not slice:
s = ['Error: slice orientation undefined.']
s.append('Options are')
s.append('[\'x\',float] - slice parallel to y-axis at x=float')
s.append('[\'y\',float] - slice parallel to x-axis at y=float')
s.append('[\'theta\',float] - angle measured anti-clockwise from +x')
s.append('[[float,float],[float,float]] - point to point')
s = '\n'.join(s)
pyfehm_print(s,self._silent)
return True
return False
def new_variable(self,name,time,data):
'''Creates a new variable, which is some combination of the available variables.
:param name: Name for the variable.
:type name: str
:param time: Time key which the variable should be associated with. Must be one of the existing keys, i.e., an item in fcontour.times.
:type time: fl64
:param data: Variable data, most likely some combination of the available parameters, e.g., pressure*temperature, pressure[t=10] - pressure[t=5]
:type data: lst[fl64]
'''
if time not in self.times:
pyfehm_print('ERROR: supplied time must correspond to an existing time in fcontour.times',self._silent)
return
if name in self.variables:
pyfehm_print('ERROR: there is already a variable called \''+name+'\', please choose a different name',self._silent)
return
self._data[time][name] = data
if name not in self._user_variables:
self._user_variables.append(name)
def slice(self, variable, slice, divisions, time=None, method='nearest'):
'''Returns mesh data for a specified slice orientation from 3-D contour output data.
:param variable: Output data variable, for example 'P' = pressure. Alternatively, variable can be a five element list, first element 'cfs', remaining elements fault azimuth (relative to x), dip, friction coefficient and cohesion. Will return coulomb failure stress.
:type variable: str
:param time: Time for which output data is requested. Can be supplied via ``fcontour.times`` list. Default is most recently available data.
:type time: fl64
:param slice: List specifying orientation of output slice, e.g., ['x',200.] is a vertical slice at ``x = 200``, ['z',-500.] is a horizontal slice at ``z = -500.``, [point1, point2] is a fixed limit vertical or horizontal domain corresponding to the bounding box defined by point1 and point2.
:type slice: lst[str,fl64]
:param divisions: Resolution to supply mesh data.
:type divisions: [int,int]
:param method: Method of interpolation, options are 'nearest', 'linear'.
:type method: str:
:returns: X -- x-coordinates of mesh data.
'''
if time==None:
if np.min(self.times)<0: time = self.times[0]
else: time = self.times[-1]
from scipy.interpolate import griddata
delta = False
if isinstance(time,list) or isinstance(time,np.ndarray):
if len(time)>1:
time0 = np.min(time)
time = np.max(time)
delta=True
dat = self[time]
# check to see if cfs plot requested
cfs = False
if isinstance(variable,list):
if variable[0] in ['cfs','CFS']: cfs = True
if not cfs:
if delta: dat0 = self[time0]
if isinstance(slice[0],str):
if slice[0].startswith('y'):
xmin = np.min(dat['x']);xmax = np.max(dat['x'])
ymin = np.min(dat['z']);ymax = np.max(dat['z'])
if slice[1] is None:
points = np.transpose(np.array([dat['x'],dat['z'],np.ones((1,len(dat['z'])))[0]]))
slice[1] = 1
else:
points = np.transpose(np.array([dat['x'],dat['z'],dat['y']]))
elif slice[0].startswith('x'):
xmin = np.min(dat['y']);xmax = np.max(dat['y'])
ymin = np.min(dat['z']);ymax = np.max(dat['z'])
if slice[1] is None:
points = np.transpose(np.array([dat['y'],dat['z'],np.ones((1,len(dat['z'])))[0]]))
slice[1] = 1
else:
points = np.transpose(np.array([dat['y'],dat['z'],dat['x']]))
elif slice[0].startswith('z'):
xmin = np.min(dat['x']);xmax = np.max(dat['x'])
ymin = np.min(dat['y']);ymax = np.max(dat['y'])
if slice[1] is None:
points = np.transpose(np.array([dat['x'],dat['y'],np.ones((1,len(dat['y'])))[0]]))
slice[1] = 1
else:
points = np.transpose(np.array([dat['x'],dat['y'],dat['z']]))
elif slice[0].startswith('theta'):
pyfehm_print('ERROR: theta slicing not supported yet',self._silent)
return
xrange = np.linspace(xmin,xmax,divisions[0])
yrange = np.linspace(ymin,ymax,divisions[1])
X,Y = np.meshgrid(xrange,yrange)
Z = (X+np.sqrt(1.757))/(X+np.sqrt(1.757))*slice[1]
pointsI = np.transpose(np.reshape((X,Y,Z),(3,X.size)))
vals = np.transpose(np.array(dat[variable]))
valsI = griddata(points,vals,pointsI,method=method)
valsI = np.reshape(valsI,(X.shape[0],X.shape[1]))
if delta:
vals = np.transpose(np.array(dat0[variable]))
valsI0 = griddata(points,vals,pointsI,method=method)
valsI0 = np.reshape(valsI0,(X.shape[0],X.shape[1]))
valsI = valsI - valsI0
elif isinstance(slice[0],list):
# check if horizontal or vertical slice
dx,dy,dz = abs(slice[0][0]-slice[1][0]),abs(slice[0][1]-slice[1][1]),abs(slice[0][2]-slice[1][2])
if 100*dz<dx and 100*dz<dy: #horizontal
xmin,xmax = np.min([slice[0][0],slice[1][0]]),np.max([slice[0][0],slice[1][0]])
ymin,ymax = np.min([slice[0][1],slice[1][1]]),np.max([slice[0][1],slice[1][1]])
xrange = np.linspace(xmin,xmax,divisions[0])
yrange = np.linspace(ymin,ymax,divisions[1])
X,Y = np.meshgrid(xrange,yrange)
Z = (X+np.sqrt(1.757))/(X+np.sqrt(1.757))*(slice[0][2]+slice[1][2])/2
else: #vertical
xmin,xmax = 0,np.sqrt((slice[0][0]-slice[1][0])**2+(slice[0][1]-slice[1][1])**2)
ymin,ymax = np.min([slice[0][2],slice[1][2]]),np.max([slice[0][2],slice[1][2]])
xrange = np.linspace(xmin,xmax,divisions[0])
yrange = np.linspace(ymin,ymax,divisions[1])
X,Z = np.meshgrid(xrange,yrange)
Y = X/xmax*abs(slice[0][1]-slice[1][1]) + slice[0][1]
X = X/xmax*abs(slice[0][0]-slice[1][0]) + slice[0][0]
points = np.transpose(np.array([dat['x'],dat['y'],dat['z']]))
pointsI = np.transpose(np.reshape((X,Y,Z),(3,X.size)))
vals = np.transpose(np.array(dat[variable]))
valsI = griddata(points,vals,pointsI,method=method)
valsI = np.reshape(valsI,(X.shape[0],X.shape[1]))
if delta:
vals = np.transpose(np.array(dat0[variable]))
valsI0 = griddata(points,vals,pointsI,method=method)
valsI0 = np.reshape(valsI0,(X.shape[0],X.shape[1]))
valsI = valsI - valsI0
else:
if delta: time0 = time[0]; time = time[-1]
X,Y,Z,sxx = self.slice('strs_xx', slice, divisions, time, method)
X,Y,Z,syy = self.slice('strs_yy', slice, divisions, time, method)
X,Y,Z,szz = self.slice('strs_zz', slice, divisions, time, method)
X,Y,Z,sxy = self.slice('strs_xy', slice, divisions, time, method)
X,Y,Z,sxz = self.slice('strs_xz', slice, divisions, time, method)
X,Y,Z,syz = self.slice('strs_yz', slice, divisions, time, method)
X,Y,Z,sp = self.slice('P', slice, divisions, time, method)
dip = variable[2]/180.*math.pi
azi = variable[1]/180.*math.pi+3.14159/2.
nhat = np.array([np.cos(azi)*np.sin(dip),np.sin(azi)*np.sin(dip),np.cos(dip)])
mu = variable[3]
cohesion = variable[4]
px = sxx*nhat[0]+sxy*nhat[1]+sxz*nhat[2]
py = sxy*nhat[0]+syy*nhat[1]+syz*nhat[2]
pz = sxz*nhat[0]+syz*nhat[1]+szz*nhat[2]
sig = px*nhat[0]+py*nhat[1]+pz*nhat[2]
tau = np.sqrt(px**2+py**2+pz**2 - sig**2)
valsI = tau - mu*(sig-sp) - cohesion
if delta:
X,Y,Z,sxx = self.slice('strs_xx', slice, divisions, time0, method)
X,Y,Z,syy = self.slice('strs_yy', slice, divisions, time0, method)
X,Y,Z,szz = self.slice('strs_zz', slice, divisions, time0, method)
X,Y,Z,sxy = self.slice('strs_xy', slice, divisions, time0, method)
X,Y,Z,sxz = self.slice('strs_xz', slice, divisions, time0, method)
X,Y,Z,syz = self.slice('strs_yz', slice, divisions, time0, method)
X,Y,Z,sp = self.slice('P', slice, divisions, time0, method)
px = sxx*nhat[0]+sxy*nhat[1]+sxz*nhat[2]
py = sxy*nhat[0]+syy*nhat[1]+syz*nhat[2]
pz = sxz*nhat[0]+syz*nhat[1]+szz*nhat[2]
sig = px*nhat[0]+py*nhat[1]+pz*nhat[2]
tau = np.sqrt(px**2+py**2+pz**2 - sig**2)
valsI = valsI - (tau - mu*(sig-sp) - cohesion)
return X, Y, Z, valsI
def slice_plot_line(self,variable=None,time=None,slice='',divisions=[20,20],labels=False, label_size=10.,levels=10,xlims=[],
ylims=[],colors='k',linestyle='-',save='', xlabel='x / m',ylabel='y / m',title='', font_size='medium', method='nearest',
equal_axes=True):
'''Returns a line plot of contour data. Invokes the ``slice()`` method to interpolate slice data for plotting.
:param variable: Output data variable, for example 'P' = pressure.
:type variable: str
:param time: Time for which output data is requested. Can be supplied via ``fcontour.times`` list. Default is most recently available data. If a list of two times is passed, the difference between the first and last is plotted.
:type time: fl64
:param slice: List specifying orientation of output slice, e.g., ['x',200.] is a vertical slice at ``x = 200``, ['z',-500.] is a horizontal slice at ``z = -500.``, [point1, point2] is a fixed limit vertical or horizontal domain corresponding to the bounding box defined by point1 and point2.
:type slice: lst[str,fl64]
:param divisions: Resolution to supply mesh data.
:type divisions: [int,int]
:param method: Method of interpolation, options are 'nearest', 'linear'.
:type method: str
:param labels: Specify whether labels should be added to contour plot.
:type labels: bool
:param label_size: Specify text size of labels on contour plot, either as an integer or string, e.g., 10, 'small', 'x-large'.
:type label_size: str, int
:param levels: Contour levels to plot. Can specify specific levels in list form, or a single integer indicating automatic assignment of levels.
:type levels: lst[fl64], int
:param xlims: Plot limits on x-axis.
:type xlims: [fl64, fl64]
:param ylims: Plot limits on y-axis.
:type ylims: [fl64, fl64]
:param linestyle: Style of contour lines, e.g., 'k-' = solid black line, 'r:' red dotted line.
:type linestyle: str
:param save: Name to save plot. Format specified extension (default .png if none give). Supported extensions: .png, .eps, .pdf.
:type save: str
:param xlabel: Label on x-axis.
:type xlabel: str
:param ylabel: Label on y-axis.
:type ylabel: str
:param title: Plot title.
:type title: str
:param font_size: Specify text size, either as an integer or string, e.g., 10, 'small', 'x-large'.
:type font_size: str, int
:param equal_axes: Specify equal scales on axes.
:type equal_axes: bool
'''
save = os_path(save)
# at this stage, only structured grids are supported
if time==None: time = self.times[-1]
delta = False
if isinstance(time,list) or isinstance(time,np.ndarray):
if len(time)>1:
time0 = np.min(time)
time = np.max(time)
delta=True
return_flag = self._check_inputs(variable,time,slice)
if return_flag: return
# gather plot data
X, Y, Z, valsI = self.slice(variable=variable, time=time, slice=slice, divisions=divisions, method=method)
if delta:
X, Y, Z, valsIi = self.slice(variable=variable, time=time0, slice=slice, divisions=divisions, method=method)
valsI = valsI - valsIi
plt.clf()
plt.figure(figsize=[8,8])
ax = plt.axes([0.15,0.15,0.75,0.75])
if xlims: ax.set_xlim(xlims)
if ylims: ax.set_ylim(ylims)
if equal_axes: ax.set_aspect('equal', 'datalim')
CS = plt.contour(X,Y,valsI,levels,colors=colors,linestyle=linestyle)
if labels: plt.clabel(CS,incline=1,fontsize=label_size)
if xlabel: plt.xlabel(xlabel,size=font_size)
if ylabel: plt.ylabel(ylabel,size=font_size)
if title: plt.title(title,size=font_size)
for t in ax.get_xticklabels():
t.set_fontsize(font_size)
for t in ax.get_yticklabels():
t.set_fontsize(font_size)
extension, save_fname, pdf = save_name(save,variable=variable,time=time)
plt.savefig(save_fname, dpi=100, facecolor='w', edgecolor='w',orientation='portrait',
format=extension,transparent=True, bbox_inches=None, pad_inches=0.1)
if pdf:
os.system('epstopdf ' + save_fname)
os.remove(save_fname)
def slice_plot(self,variable=None,time=None,slice='',divisions=[20,20],levels=10,cbar=False,xlims=[],
ylims=[],colors='k',linestyle='-',save='', xlabel='x / m',ylabel='y / m',title='', font_size='medium', method='nearest',
equal_axes=True,mesh_lines = None,perm_contrasts=None, scale = 1.):
'''Returns a filled plot of contour data. Invokes the ``slice()`` method to interpolate slice data for plotting.
:param variable: Output data variable, for example 'P' = pressure.
:type variable: str
:param time: Time for which output data is requested. Can be supplied via ``fcontour.times`` list. Default is most recently available data. If a list of two times is passed, the difference between the first and last is plotted.
:type time: fl64
:param slice: List specifying orientation of output slice, e.g., ['x',200.] is a vertical slice at ``x = 200``, ['z',-500.] is a horizontal slice at ``z = -500.``, [point1, point2] is a fixed limit vertical or horizontal domain corresponding to the bounding box defined by point1 and point2.
:type slice: lst[str,fl64]
:param divisions: Resolution to supply mesh data.
:type divisions: [int,int]
:param method: Method of interpolation, options are 'nearest', 'linear'.
:type method: str
:param levels: Contour levels to plot. Can specify specific levels in list form, or a single integer indicating automatic assignment of levels.
:type levels: lst[fl64], int
:param cbar: Add colour bar to plot.
:type cbar: bool
:param xlims: Plot limits on x-axis.
:type xlims: [fl64, fl64]
:param ylims: Plot limits on y-axis.
:type ylims: [fl64, fl64]
:param colors: Specify colour string for contour levels.
:type colors: lst[str]
:param linestyle: Style of contour lines, e.g., 'k-' = solid black line, 'r:' red dotted line.
:type linestyle: str
:param save: Name to save plot. Format specified extension (default .png if none give). Supported extensions: .png, .eps, .pdf.
:type save: str
:param xlabel: Label on x-axis.
:type xlabel: str
:param ylabel: Label on y-axis.
:type ylabel: str
:param title: Plot title.
:type title: str
:param font_size: Specify text size, either as an integer or string, e.g., 10, 'small', 'x-large'.
:type font_size: str, int
:param equal_axes: Specify equal scales on axes.
:type equal_axes: bool
:param mesh_lines: Superimpose mesh on the plot (line intersections correspond to node positions) according to specified linestyle, e.g., 'k:' is a dotted black line.
:type mesh_lines: bool
:param perm_contrasts: Superimpose permeability contours on the plot according to specified linestyle, e.g., 'k:' is a dotted black line. A gradient method is used to pick out sharp changes in permeability.
:type perm_contrasts: bool
'''
if save:
save = os_path(save)
# at this stage, only structured grids are supported
if time==None:
if np.min(self.times)<0: time = self.times[0]
else: time = self.times[-1]
delta = False
if isinstance(time,list) or isinstance(time,np.ndarray):
if len(time)>1:
time0 = np.min(time)
time = np.max(time)
delta=True
# if data not available for one coordinate, assume 2-D simulation, adjust slice accordingly
if 'x' not in self.variables: slice = ['x',None]
if 'y' not in self.variables: slice = ['y',None]
if 'z' not in self.variables: slice = ['z',None]
return_flag = self._check_inputs(variable=variable,time=time,slice=slice)
if return_flag: return
# gather plot data
X, Y, Z, valsI = self.slice(variable=variable, time=time, slice=slice, divisions=divisions, method=method)
if delta:
X, Y, Z, valsIi = self.slice(variable=variable, time=time0, slice=slice, divisions=divisions, method=method)
valsI = valsI - valsIi
if isinstance(slice[0],list):
# check if horizontal or vertical slice
dx,dy,dz = abs(slice[0][0]-slice[1][0]),abs(slice[0][1]-slice[1][1]),abs(slice[0][2]-slice[1][2])
if not(100*dz<dx and 100*dz<dy):
if dx < dy:
X = Y
Y = Z
plt.clf()
plt.figure(figsize=[8,8])
ax = plt.axes([0.15,0.15,0.75,0.75])
if xlims: ax.set_xlim(xlims)
if ylims: ax.set_ylim(ylims)
if equal_axes: ax.set_aspect('equal', 'datalim')
if not isinstance(scale,list):
CS = plt.contourf(X,Y,valsI*scale,levels)
elif len(scale) == 2:
CS = plt.contourf(X,Y,valsI*scale[0]+scale[1],levels)
if xlabel: plt.xlabel(xlabel,size=font_size)
if ylabel: plt.ylabel(ylabel,size=font_size)
if title: plt.title(title,size=font_size)
if cbar:
cbar=plt.colorbar(CS)
for t in cbar.ax.get_yticklabels():
t.set_fontsize(font_size)
for t in ax.get_xticklabels():
t.set_fontsize(font_size)
for t in ax.get_yticklabels():
t.set_fontsize(font_size)
if perm_contrasts:
if 'perm_x' not in self.variables:
pyfehm_print('WARNING: No permeability data to construct unit boundaries.',self._silent)
else:
X, Y, Z, k = self.slice(variable='perm_z', time=time, slice=slice, divisions=divisions, method=method)
# calculate derivatives in X and Y directions
dkdX = np.diff(k,1,0)#/np.diff(Y,1,0)
dkdX = (dkdX[1:,1:-1]+dkdX[:-1,1:-1])/2
dkdY = np.diff(k,1,1)#/np.diff(X,1,1)
dkdY = (dkdY[1:-1,1:]+dkdY[1:-1,:-1])/2
dk = (abs((dkdX+dkdY)/2)>0.2)*1.
col = 'k'; ln = '-'
for let in perm_contrasts:
if let in ['k','r','g','b','m','c','y','w']: col = let
if let in ['-','--','-.',':']: ln = let
CS = plt.contour(X[1:-1,1:-1],Y[1:-1,1:-1],dk,[0.99999999999],colors=col,linestyles=ln)
xlims = ax.get_xlim()
ylims = ax.get_ylim()
if mesh_lines:
# add grid lines
ax.set_xlim(xlims[0],xlims[1])
ax.set_ylim(ylims[0],ylims[1])
if slice[0] == 'z':
for t in np.unique(self[self.times[0]]['x']):
ax.plot([t,t],[ylims[0],ylims[1]],mesh_lines,zorder=100)
for t in np.unique(self[self.times[0]]['y']):
ax.plot([xlims[0],xlims[1]],[t,t],mesh_lines,zorder=100)
elif slice[0] == 'x':
for t in np.unique(self[self.times[0]]['y']):
ax.plot([t,t],[ylims[0],ylims[1]],mesh_lines,zorder=100)
for t in np.unique(self[self.times[0]]['z']):
ax.plot([xlims[0],xlims[1]],[t,t],mesh_lines,zorder=100)
elif slice[0] == 'y':
for t in np.unique(self[self.times[0]]['x']):
ax.plot([t,t],[ylims[0],ylims[1]],mesh_lines,zorder=100)
for t in np.unique(self[self.times[0]]['z']):
ax.plot([xlims[0],xlims[1]],[t,t],mesh_lines,zorder=100)
if save:
extension, save_fname, pdf = save_name(save=save,variable=variable,time=time)
plt.savefig(save_fname, dpi=400, facecolor='w', edgecolor='w',orientation='portrait',
format=extension,transparent=True, bbox_inches=None, pad_inches=0.1)
if pdf:
os.system('epstopdf ' + save_fname)
os.remove(save_fname)
else:
plt.show()
def profile(self, variable, profile, time=None, divisions=30, method='nearest'):
'''Return variable data along the specified line in 3-D space. If only two points are supplied,
the profile is assumed to be a straight line between them.
:param variable: Output data variable, for example 'P' = pressure. Can specify multiple variables with a list.
:type variable: str, lst[str]
:param time: Time for which output data is requested. Can be supplied via ``fcontour.times`` list. Default is most recently available data.
:type time: fl64
:param profile: Three column array with each row corresponding to a point in the profile.
:type profile: ndarray
:param divisions: Number of points in profile. Only relevant if straight line profile being constructed from two points.
:type divisions: int
:param method: Interpolation method, options are 'nearest' (default) and 'linear'.
:type method: str
:returns: Multi-column array. Columns are in order x, y and z coordinates of profile, followed by requested variables.
'''
if isinstance(profile,list): profile = np.array(profile)
if divisions: divisions = int(divisions)
if time==None: time = self.times[-1]
from scipy.interpolate import griddata
if not isinstance(variable,list): variable = [variable,]
dat = self[time]
points = np.transpose(np.array([dat['x'],dat['y'],dat['z']]))
if profile.shape[0]==2:
# construct line profile
xrange = np.linspace(profile[0][0],profile[1][0],divisions)
yrange = np.linspace(profile[0][1],profile[1][1],divisions)
zrange = np.linspace(profile[0][2],profile[1][2],divisions)
profile = np.transpose(np.array([xrange,yrange,zrange]))
outpoints = [list(profile[:,0]),list(profile[:,1]),list(profile[:,2])]
for var in variable:
vals = np.transpose(np.array(dat[var]))
valsI = griddata(points,vals,profile,method=method)
outpoints.append(list(valsI))
return np.array(outpoints).transpose()
def profile_plot(self,variable=None,time=None, profile=[],divisions = 30,xlims=[],ylims=[],
color='k',marker='x-',save='',xlabel='distance / m',ylabel='',title='',font_size='medium',method='nearest',
verticalPlot=False,elevationPlot=False):
'''Return a plot of the given variable along a specified profile. If the profile comprises two points,
these are interpreted as the start and end points of a straight line profile.
:param variable: Output data variable, for example 'P' = pressure. Can specify multiple variables with a list.
:type variable: str, lst[str]
:param time: Time for which output data is requested. Can be supplied via ``fcontour.times`` list. Default is most recently available data. If a list of two times is passed, the difference between the first and last is plotted.
:type time: fl64
:param profile: Three column array with each row corresponding to a point in the profile.
:type profile: ndarray
:param divisions: Number of points in profile. Only relevant if straight line profile being constructed from two points.
:type divisions: int
:param method: Interpolation method, options are 'nearest' (default) and 'linear'.
:type method: str
:param xlims: Plot limits on x-axis.
:type xlims: [fl64, fl64]
:param ylims: Plot limits on y-axis.
:type ylims: [fl64, fl64]
:param color: Colour of profile.
:type color: str
:param marker: Style of line, e.g., 'x-' = solid line with crosses, 'o:' dotted line with circles.
:type marker: str
:param save: Name to save plot. Format specified extension (default .png if none give). Supported extensions: .png, .eps, .pdf.
:type save: str
:param xlabel: Label on x-axis.
:type xlabel: str
:param ylabel: Label on y-axis.
:type ylabel: str
:param title: Plot title.
:type title: str
:param font_size: Specify text size, either as an integer or string, e.g., 10, 'small', 'x-large'.
:type font_size: str, int
:param verticalPlot: Flag to plot variable against profile distance on the y-axis.
:type verticalPlot: bool
:param elevationPlot: Flag to plot variable against elevation on the y-axis.
:type elevationPlot: bool
'''
save = os_path(save)
if time==None: time = self.times[-1]
delta = False
if isinstance(time,list) or isinstance(time,np.ndarray):
if len(time)>1:
time0 = np.min(time)
time = np.max(time)
delta=True
if not variable:
s = ['ERROR: no plot variable specified.']
s.append('Options are')
for var in self.variables: s.append(var)
s = '\n'.join(s)
pyfehm_print(s,self._silent)
return True
if not ylabel: ylabel = variable
plt.clf()
plt.figure(figsize=[8,8])
ax = plt.axes([0.15,0.15,0.75,0.75])
outpts = self.profile(variable=variable,profile=profile,time=time,divisions=divisions,method=method)
if delta:
outptsI = self.profile(variable=variable,profile=profile,time=time,divisions=divisions,method=method)
outpts[:,3] = outpts[:,3] - outptsI[:,3]
x0,y0,z0 = outpts[0,:3]
x = np.sqrt((outpts[:,0]-x0)**2+(outpts[:,1]-y0)**2+(outpts[:,2]-z0)**2)
y = outpts[:,3]
if verticalPlot:
temp = x; x = y; y = temp
temp = xlabel; xlabel = ylabel; ylabel = temp
temp = xlims; xlims = ylims; ylims = temp
if elevationPlot:
x = outpts[:,3]
y = outpts[:,2]
temp = xlabel; xlabel = ylabel; ylabel = temp
temp = xlims; xlims = ylims; ylims = temp
plt.plot(x,y,marker,color=color)
if xlims: ax.set_xlim(xlims)
if ylims: ax.set_ylim(ylims)
if xlabel: plt.xlabel(xlabel,size=font_size)
if ylabel: plt.ylabel(ylabel,size=font_size)
if title: plt.title(title,size=font_size)
for t in ax.get_xticklabels():
t.set_fontsize(font_size)
for t in ax.get_yticklabels():
t.set_fontsize(font_size)
extension, save_fname, pdf = save_name(save,variable=variable,time=time)
plt.savefig(save_fname, dpi=100, facecolor='w', edgecolor='w',orientation='portrait',
format=extension,transparent=True, bbox_inches=None, pad_inches=0.1)
if pdf:
os.system('epstopdf ' + save_fname)
os.remove(save_fname)
def cutaway_plot(self,variable=None,time=None,divisions=[20,20,20],levels=10,cbar=False,angle=[45,45],xlims=[],method='nearest',
ylims=[],zlims=[],colors='k',linestyle='-',save='', xlabel='x / m', ylabel='y / m', zlabel='z / m', title='',
font_size='medium',equal_axes=True,grid_lines=None):
'''Returns a filled plot of contour data on each of 3 planes in a cutaway plot. Invokes the ``slice()`` method to interpolate slice data for plotting.
:param variable: Output data variable, for example 'P' = pressure.
:type variable: str
:param time: Time for which output data is requested. Can be supplied via ``fcontour.times`` list. Default is most recently available data. If a list of two times is passed, the difference between the first and last is plotted.
:type time: fl64
:param divisions: Resolution to supply mesh data in [x,y,z] coordinates.
:type divisions: [int,int,int]
:param levels: Contour levels to plot. Can specify specific levels in list form, or a single integer indicating automatic assignment of levels.
:type levels: lst[fl64], int
:param cbar: Include colour bar.
:type cbar: bool
:param angle: View angle of zone. First number is tilt angle in degrees, second number is azimuth. Alternatively, if angle is 'x', 'y', 'z', view is aligned along the corresponding axis.
:type angle: [fl64,fl64], str
:param method: Method of interpolation, options are 'nearest', 'linear'.
:type method: str
:param xlims: Plot limits on x-axis.
:type xlims: [fl64, fl64]
:param ylims: Plot limits on y-axis.
:type ylims: [fl64, fl64]
:param zlims: Plot limits on z-axis.
:type zlims: [fl64, fl64]
:param colors: Specify colour string for contour levels.
:type colors: lst[str]
:param linestyle: Style of contour lines, e.g., 'k-' = solid black line, 'r:' red dotted line.
:type linestyle: str
:param save: Name to save plot. Format specified extension (default .png if none give). Supported extensions: .png, .eps, .pdf.
:type save: str
:param xlabel: Label on x-axis.
:type xlabel: str
:param ylabel: Label on y-axis.
:type ylabel: str
:param zlabel: Label on z-axis.
:type zlabel: str
:param title: Plot title.
:type title: str
:param font_size: Specify text size, either as an integer or string, e.g., 10, 'small', 'x-large'.
:type font_size: str, int
:param equal_axes: Force plotting with equal aspect ratios for all axes.
:type equal_axes: bool
:param grid_lines: Extend tick lines across plot according to specified linestyle, e.g., 'k:' is a dotted black line.
:type grid_lines: bool
'''
save = os_path(save)
# check inputs
if time==None: time = self.times[-1]
delta = False
if isinstance(time,list) or isinstance(time,np.ndarray):
if len(time)>1:
time0 = np.min(time)
time = np.max(time)
delta=True
return_flag = self._check_inputs(variable=variable,time=time,slice=slice)
if return_flag: return
# set up axes
fig = plt.figure(figsize=[11.7,8.275])
ax = plt.axes(projection='3d')
ax.set_aspect('equal', 'datalim')
# make axes equal
if 'x' not in self.variables or 'y' not in self.variables or 'z' not in self.variables:
pyfehm_print('ERROR: No xyz data, skipping',self._silent)
return
xmin,xmax = np.min(self[time]['x']),np.max(self[time]['x'])
ymin,ymax = np.min(self[time]['y']),np.max(self[time]['y'])
zmin,zmax = np.min(self[time]['z']),np.max(self[time]['z'])
if equal_axes:
MAX = np.max([xmax-xmin,ymax-ymin,zmax-zmin])/2
C = np.array([xmin+xmax,ymin+ymax,zmin+zmax])/2
for direction in (-1, 1):
for point in np.diag(direction * MAX * np.array([1,1,1])):
ax.plot([point[0]+C[0]], [point[1]+C[1]], [point[2]+C[2]], 'w')
if not xlims: xlims = [xmin,xmax]
if not ylims: ylims = [ymin,ymax]
if not zlims: zlims = [zmin,zmax]
# set view angle
ax.view_init(angle[0],angle[1])
ax.set_xlabel(xlabel,size=font_size)
ax.set_ylabel(ylabel,size=font_size)
ax.set_zlabel(zlabel,size=font_size)
plt.title(title+'\n\n\n\n',size=font_size)
scale = 1e6
levels = [l/scale for l in levels]
X, Y, Z, valsI = self.slice(variable, [[xlims[0],ylims[0],zlims[0]],[xlims[1],ylims[1],zlims[0]]], [divisions[0],divisions[1]], time, method)
if delta:
X, Y, Z, valsIi = self.slice(variable, [[xlims[0],ylims[0],zlims[0]],[xlims[1],ylims[1],zlims[0]]], [divisions[0],divisions[1]], time0, method)
valsI = valsI - valsIi
cset = ax.contourf(X, Y, valsI/scale, zdir='z', offset=zlims[0], cmap=cm.coolwarm,levels=levels)
X, Y, Z, valsI = self.slice(variable, [[xlims[0],ylims[0],zlims[0]],[xlims[0],ylims[1],zlims[1]]], [divisions[1],divisions[2]], time, method)
if delta:
X, Y, Z, valsIi = self.slice(variable, [[xlims[0],ylims[0],zlims[0]],[xlims[0],ylims[1],zlims[1]]], [divisions[1],divisions[2]], time0, method)
valsI = valsI - valsIi
cset = ax.contourf(valsI/scale, Y, Z, zdir='x', offset=xlims[0], cmap=cm.coolwarm,levels=levels)
X, Y, Z, valsI = self.slice(variable, [[xlims[0],ylims[0],zlims[0]],[xlims[1],ylims[0],zlims[1]]], [divisions[0],divisions[2]], time, method)
if delta:
X, Y, Z, valsIi = self.slice(variable, [[xlims[0],ylims[0],zlims[0]],[xlims[1],ylims[0],zlims[1]]], [divisions[0],divisions[2]], time0, method)
valsI = valsI - valsIi
cset = ax.contourf(X, valsI/scale, Z, zdir='y', offset=ylims[0], cmap=cm.coolwarm,levels=levels)
if cbar:
cbar=plt.colorbar(cset)
tick_labels = [str(float(t*scale)) for t in levels]
cbar.locator = matplotlib.ticker.FixedLocator(levels)
cbar.formatter = matplotlib.ticker.FixedFormatter(tick_labels)
cbar.update_ticks()
if grid_lines:
# add grid lines
ax.set_xlim(xlims[0],xlims[1])
ax.set_ylim(ylims[0],ylims[1])
ax.set_zlim(zlims[0],zlims[1])
xticks = ax.get_xticks()
yticks = ax.get_yticks()
zticks = ax.get_zticks()
off = 0.
for t in xticks:
ax.plot([t,t],[ylims[0],ylims[1]],[zlims[0]+off,zlims[0]+off],grid_lines,zorder=100)
ax.plot([t,t],[ylims[0]+off,ylims[0]+off],[zlims[0],zlims[1]],grid_lines,zorder=100)
for t in yticks:
ax.plot([xlims[0],xlims[1]],[t,t],[zlims[0]+off,zlims[0]+off],grid_lines,zorder=100)
ax.plot([xlims[0]+off,xlims[0]+off],[t,t],[zlims[0],zlims[1]],grid_lines,zorder=100)
for t in zticks:
ax.plot([xlims[0],xlims[1]],[ylims[0]+off,ylims[0]+off],[t,t],grid_lines,zorder=100)
ax.plot([xlims[0]+off,xlims[0]+off],[ylims[0],ylims[1]],[t,t],grid_lines,zorder=100)
for t in ax.get_yticklabels():
t.set_fontsize(font_size)
for t in ax.get_xticklabels():
t.set_fontsize(font_size)
for t in ax.get_zticklabels():
t.set_fontsize(font_size)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_zlim(zlims)
extension, save_fname, pdf = save_name(save=save,variable=variable,time=time)
plt.savefig(save_fname, dpi=100, facecolor='w', edgecolor='w',orientation='portrait',
format=extension,transparent=True, bbox_inches=None, pad_inches=0.1)
def node(self,node,time=None,variable=None):
'''Returns all information for a specific node.
If time and variable not specified, a dictionary of time series is returned with variables as the dictionary keys.
If only time is specified, a dictionary of variable values at that time is returned, with variables as dictionary keys.
If only variable is specified, a time series vector is returned for that variable.
If both time and variable are specified, a single value is returned, corresponding to the variable value at that time, at that node.
:param node: Node index for which variable information required.
:type node: int
:param time: Time at which variable information required. If not specified, all output.
:type time: fl64
:param variable: Variable for which information requested. If not specified, all output.
:type variable: str
'''
if 'n' not in self.variables:
pyfehm_print('Node information not available',self._silent)
return
nd = np.where(self[self.times[0]]['n']==node)[0][0]
if time is None and variable is None:
ks = copy(self.variables); ks.remove('n')
outdat = dict([(k,[]) for k in ks])
for t in self.times:
dat = self[t]
for k in list(outdat.keys()):
outdat[k].append(dat[k][nd])
elif time is None:
if variable not in self.variables:
pyfehm_print('ERROR: no variable by that name',self._silent)
return
outdat = []
for t in self.times:
dat = self[t]
outdat.append(dat[variable][nd])
outdat = np.array(outdat)
elif variable is None:
ks = copy(self.variables); ks.remove('n')
outdat = dict([(k,self[time][k][nd]) for k in ks])
else:
outdat = self[time][variable][nd]
return outdat
def paraview(self, grid, stor = None, exe = dflt.paraview_path,filename = 'temp.vtk',show=None,diff = False,zscale = 1., time_derivatives = False):
""" Launches an instance of Paraview that displays the contour object.
:param grid: Path to grid file associated with FEHM simulation that produced the contour output.
:type grid: str
:param stor: Path to grid file associated with FEHM simulation that produced the contour output.
:type stor: str
:param exe: Path to Paraview executable.
:type exe: str
:param filename: Name of VTK file to be output.
:type filename: str
:param show: Variable to show when Paraview starts up (default = first available variable in contour object).
:type show: str
:param diff: Flag to request PyFEHM to also plot differences of contour variables (from initial state) with time.
:type diff: bool
:param zscale: Factor by which to scale z-axis. Useful for visualising laterally extensive flow systems.
:type zscale: fl64
:param time_derivatives: Calculate new fields for time derivatives of contour data. For precision reasons, derivatives are calculated with units of 'per day'.
:type time_derivatives: bool
"""
from fdata import fdata
dat = fdata()
dat.grid.read(grid,storfilename=stor)
if show is None:
for var in self.variables:
if var not in ['x','y','z','n']: break
show = var
dat.paraview(exe=exe,filename=filename,contour=self,show=show,diff=diff,zscale=zscale,time_derivatives=time_derivatives)
def _get_variables(self): return self._variables
variables = property(_get_variables)#: (*lst[str]*) List of variables for which output data are available.
def _get_user_variables(self): return self._user_variables
user_variables = property(_get_user_variables) #: (*lst[str]*) List of user-defined variables for which output data are available.
def _get_format(self): return self._format
format = property(_get_format) #: (*str*) Format of output file, options are 'tec', 'surf', 'avs' and 'avsx'.
def _get_filename(self): return self._filename
filename = property(_get_filename) #: (*str*) Name of FEHM contour output file. Wildcards can be used to define multiple input files.
def _get_times(self): return np.sort(self._times)
times = property(_get_times) #: (*lst[fl64]*) List of times (in seconds) for which output data are available.
def _get_material_properties(self): return self._material_properties
def _set_material_properties(self,value): self._material_properties = value
material_properties = property(_get_material_properties, _set_material_properties) #: (*lst[str]*) List of material properties, keys for the material attribute.
def _get_material(self): return self._material
def _set_material(self,value): self._material = value
material = property(_get_material, _set_material) #: (*dict[str]*) Dictionary of material properties, keyed by property name, items indexed by node_number - 1. This attribute is empty if no material property file supplied.
def _get_x(self): return self._x
def _set_x(self,value): self._x = value
x = property(_get_x, _set_x) #: (*lst[fl64]*) Unique list of nodal x-coordinates for grid.
def _get_y(self): return self._y
def _set_y(self,value): self._y = value
y = property(_get_y, _set_y) #: (*lst[fl64]*) Unique list of nodal y-coordinates for grid.
def _get_z(self): return self._z
def _set_z(self,value): self._z = value
z = property(_get_z, _set_z) #: (*lst[fl64]*) Unique list of nodal z-coordinates for grid.
def _get_xmin(self): return self._xmin
def _set_xmin(self,value): self._xmin = value
xmin = property(_get_xmin, _set_xmin) #: (*fl64*) Minimum nodal x-coordinate for grid.
def _get_xmax(self): return self._xmax
def _set_xmax(self,value): self._xmax = value
xmax = property(_get_xmax, _set_xmax) #: (*fl64*) Maximum nodal x-coordinate for grid.
def _get_ymin(self): return self._ymin
def _set_ymin(self,value): self._ymin = value
ymin = property(_get_ymin, _set_ymin) #: (*fl64*) Minimum nodal y-coordinate for grid.
def _get_ymax(self): return self._ymax
def _set_ymax(self,value): self._ymax = value
ymax = property(_get_ymax, _set_ymax) #: (*fl64*) Maximum nodal y-coordinate for grid.
def _get_zmin(self): return self._zmin
def _set_zmin(self,value): self._zmin = value
zmin = property(_get_zmin, _set_zmin) #: (*fl64*) Minimum nodal z-coordinate for grid.
def _get_zmax(self): return self._zmax
def _set_zmax(self,value): self._zmax = value
zmax = property(_get_zmax, _set_zmax) #: (*fl64*) Maximum nodal z-coordinate for grid.
def _get_information(self):
print('FEHM contour output - format '+self._format)
print(' call format: fcontour[time][variable][node_index-1]')
prntStr = ' times ('+str(len(self.times))+'): '
for time in self.times: prntStr += str(time)+', '
print(prntStr[:-2]+' days')
prntStr = ' variables: '
for var in self.variables: prntStr += str(var)+', '
for var in self.user_variables: prntStr += str(var)+', '
print(prntStr)
what = property(_get_information) #:(*str*) Print out information about the fcontour object.
class fhistory(object): # Reading and plotting methods associated with history output data.
'''History output information object.
'''
def __init__(self,filename=None,verbose=True):
self._filename=None
self._silent = dflt.silent
self._format = ''
self._times=[]
self._verbose = verbose
self._data={}
self._row=None
self._nodes=[]
self._zones = []
self._variables=[]
self._user_variables = []
self._keyrows={}
self.column_name=[]
self.num_columns=0
self._nkeys=1
filename = os_path(filename)
if filename: self._filename=filename; self.read(filename)
def __getitem__(self,key):
if key in self.variables or key in self.user_variables:
return self._data[key]
else: return None
def __repr__(self):
retStr = 'History output for variables '
for var in self.variables:
retStr += var+', '
retStr = retStr[:-2] + ' at '
if len(self.nodes)>10:
retStr += str(len(self.nodes)) + ' nodes.'
else:
if len(self.nodes)==1:
retStr += 'node '
else:
retStr += 'nodes '
for nd in self.nodes:
retStr += str(nd) + ', '
retStr = retStr[:-2] + '.'
return retStr
def read(self,filename): # read contents of file
'''Read in FEHM history output information.
:param filename: File name for output data, can include wildcards to define multiple output files.
:type filename: str
'''
from glob import glob
import re
glob_pattern = re.sub(r'\[','[[]',filename)
glob_pattern = re.sub(r'(?<!\[)\]','[]]', glob_pattern)
files=glob(glob_pattern)
configured=False
for i,fname in enumerate(files):
if self._verbose:
pyfehm_print(fname,self._silent)
self._file=open(fname,'rU')
header=self._file.readline()
if header.strip()=='': continue # empty file
self._detect_format(header)
if self.format=='tec':
header=self._file.readline()
if header.strip()=='': continue # empty file
i = 0; sum_file = False
while not header.startswith('variables'):
header=self._file.readline()
i = i+1
if i==10: sum_file=True; break
if sum_file: continue
self._setup_headers_tec(header)
elif self.format=='surf':
self._setup_headers_surf(header)
elif self.format=='default':
header=self._file.readline()
header=self._file.readline()
if header.strip()=='': continue # empty file
i = 0; sum_file = False
while not header.startswith('Time '):
header=self._file.readline()
i = i+1
if i==10: sum_file=True; break
if sum_file: continue
self._setup_headers_default(header)
else: pyfehm_print('Unrecognised format',self._silent);return
if not configured:
self.num_columns = len(self.nodes)+1
if self.num_columns>0: configured=True
if self.format=='tec':
self._read_data_tec(fname.split('_')[-2])
elif self.format=='surf':
self._read_data_surf(fname.split('_')[-2])
elif self.format=='default':
self._read_data_default(fname.split('_')[-1].split('.')[0])
self._file.close()
def _detect_format(self,header):
if header.startswith('TITLE'):
self._format = 'tec'
elif header.startswith('Time '):
self._format = 'surf'
else:
self._format = 'default'
def _setup_headers_tec(self,header):
header=header.split('" "Node')
if self.nodes: return
for key in header[1:-1]: self._nodes.append(int(key))
self._nodes.append(int(header[-1].split('"')[0]))
def _setup_headers_surf(self,header):
header=header.split(', Node')
if self.nodes: return
for key in header[1:]: self._nodes.append(int(key))
def _setup_headers_default(self,header):
header=header.split(' Node')
if self.nodes: return
for key in header[1:]: self._nodes.append(int(key))
def _read_data_tec(self,var_key):
self._variables.append(hist_var_names[var_key])
lns = self._file.readlines()
i = 0
while lns[i].startswith('text'): i+=1
data = []
for ln in lns[i:]: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
self._data[hist_var_names[var_key]] = dict([(node,data[:,icol+1]) for icol,node in enumerate(self.nodes)])
def _read_data_surf(self,var_key):
self._variables.append(hist_var_names[var_key])
lns = self._file.readlines()
data = []
for ln in lns: data.append([float(d) for d in ln.strip().split(',')])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
self._data[hist_var_names[var_key]] = dict([(node,data[:,icol+1]) for icol,node in enumerate(self.nodes)])
def _read_data_default(self,var_key):
self._variables.append(hist_var_names[var_key])
lns = self._file.readlines()
data = []
for ln in lns: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
self._data[hist_var_names[var_key]] = dict([(node,data[:,icol+1]) for icol,node in enumerate(self.nodes)])
def time_plot(self, variable=None, node=0, t_lim=[],var_lim=[],marker='x-',color='k',save='',xlabel='',ylabel='',
title='',font_size='medium',scale=1.,scale_t=1.): # produce a time plot
'''Generate and save a time series plot of the history data.
:param variable: Variable to plot.
:type variable: str
:param node: Node number to plot.
:type node: int
:param t_lim: Time limits on x axis.
:type t_lim: lst[fl64,fl64]
:param var_lim: Variable limits on y axis.
:type var_lim: lst[fl64,fl64]
:param marker: String denoting marker and linetype, e.g., ':s', 'o--'. Default is 'x-' (solid line with crosses).
:type marker: str
:param color: String denoting colour. Default is 'k' (black).
:type color: str
:param save: Name to save plot.
:type save: str
:param xlabel: Label on x axis.
:type xlabel: str
:param ylabel: Label on y axis.
:type ylabel: str
:param title: Title of plot.
:type title: str
:param font_size: Font size for axis labels.
:type font_size: str
:param scale: If a single number is given, then the output variable will be multiplied by this number. If a two element list is supplied then the output variable will be transformed according to y = scale[0]*x+scale[1]. Useful for transforming between coordinate systems.
:type scale: fl64
:param scale_t: As for scale but applied to the time axis.
:type scale_t: fl64
'''
save = os_path(save)
if not node: pyfehm_print('ERROR: no plot node specified.',self._silent); return
if not variable:
s = ['ERROR: no plot variable specified.']
s.append('Options are')
for var in self.variables: s.append(var)
s = '\n'.join(s)
pyfehm_print(s,self._silent)
return True
if not node:
s = ['ERROR: no plot node specified.']
s.append('Options are')
for node in self.nodes: s.append(node)
s = '\n'.join(s)
pyfehm_print(s,self._silent)
return True
plt.clf()
plt.figure(figsize=[8,8])
ax = plt.axes([0.15,0.15,0.75,0.75])
if not isinstance(scale,list):
if not isinstance(scale_t,list):
plt.plot(self.times*scale_t,self[variable][node]*scale,marker)
elif len(scale_t) == 2:
plt.plot(self.times*scale_t[0]+scale_t[1],self[variable][node]*scale,marker)
elif len(scale) == 2:
if not isinstance(scale_t,list):
plt.plot(self.times*scale_t,self[variable][node]*scale[0]+scale[1],marker)
elif len(scale_t) == 2:
plt.plot(self.times*scale_t[0]+scale_t[1],self[variable][node]*scale[0]+scale[1],marker)
if t_lim: ax.set_xlim(t_lim)
if var_lim: ax.set_ylim(var_lim)
if xlabel: plt.xlabel(xlabel,size=font_size)
if ylabel: plt.ylabel(ylabel,size=font_size)
if title: plt.title(title,size=font_size)
for t in ax.get_xticklabels():
t.set_fontsize(font_size)
for t in ax.get_yticklabels():
t.set_fontsize(font_size)
extension, save_fname, pdf = save_name(save,variable=variable,node=node)
plt.savefig(save_fname, dpi=100, facecolor='w', edgecolor='w',orientation='portrait',
format=extension,transparent=True, bbox_inches=None, pad_inches=0.1)
if pdf:
os.system('epstopdf ' + save_fname)
os.remove(save_fname)
def new_variable(self,name,node,data):
'''Creates a new variable, which is some combination of the available variables.
:param name: Name for the variable.
:type name: str
:param time: Node key which the variable should be associated with. Must be one of the existing keys, i.e., an item in fhistory.nodes.
:type time: fl64
:param data: Variable data, most likely some combination of the available parameters, e.g., pressure*temperature, pressure[t=10] - pressure[t=5]
:type data: lst[fl64]
'''
if node not in self.nodes:
pyfehm_print('ERROR: supplied node must correspond to an existing node in fhistory.nodes',self._silent)
return
if name not in self._user_variables:
self._data.update({name:dict([(nd,None) for nd in self.nodes])})
if name not in self._user_variables:
self._user_variables.append(name)
self._data[name][node] = data
def _get_variables(self): return self._variables
variables = property(_get_variables)#: (*lst[str]*) List of variables for which output data are available.
def _get_user_variables(self): return self._user_variables
def _set_user_variables(self,value): self._user_variables = value
user_variables = property(_get_user_variables, _set_user_variables) #: (*lst[str]*) List of user-defined variables for which output data are available.
def _get_format(self): return self._format
format = property(_get_format) #: (*str*) Format of output file, options are 'tec', 'surf', 'avs' and 'avsx'.
def _get_filename(self): return self._filename
filename = property(_get_filename) #: (*str*) Name of FEHM contour output file. Wildcards can be used to define multiple input files.
def _get_times(self): return np.sort(self._times)
times = property(_get_times) #: (*lst[fl64]*) List of times (in seconds) for which output data are available.
def _get_nodes(self): return self._nodes
nodes = property(_get_nodes) #: (*lst[fl64]*) List of node indices for which output data are available.
def _get_information(self):
print('FEHM history output - format '+self._format)
print(' call format: fhistory[variable][node][time_index]')
prntStr = ' nodes: '
for nd in self.nodes: prntStr += str(nd)+', '
print(prntStr)
prntStr = ' times ('+str(len(self.times))+'): '
for time in self.times: prntStr += str(time)+', '
print(prntStr[:-2]+' days')
prntStr = ' variables: '
for var in self.variables: prntStr += str(var)+', '
print(prntStr)
what = property(_get_information) #:(*str*) Print out information about the fhistory object.
class fzoneflux(fhistory): # Derived class of fhistory, for zoneflux output
'''Zone flux history output information object.
'''
# __slots__ = ['_filename','_times','_verbose','_data','_row','_zones','_variables','_keyrows','column_name','num_columns','_nkeys']
def __init__(self,filename=None,verbose=True):
super(fzoneflux,self).__init__(filename, verbose)
self._filename=None
self._times=[]
self._verbose = verbose
self._data={}
self._row=None
self._zones=[]
self._variables=[]
self._keyrows={}
self.column_name=[]
self.num_columns=0
self._nkeys=1
if filename: self._filename=filename; self.read(filename)
def _setup_headers_tec(self,header):
'placeholder'
def _read_data_tec(self,var_key):
zn = int(var_key[-5:])
if var_key.startswith('c'):
if zn not in self._zones: self._zones.append(zn)
if 'co2_source' not in self._variables:
self._variables += flxz_co2_names
for var in flxz_co2_names: self._data[var] = {}
lns = self._file.readlines()
i = 0
while lns[i].startswith('text'): i+=1
data = []
for ln in lns[i:]: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
for j,var_key in enumerate(flxz_co2_names):
self._data[var_key].update(dict([(zn,data[:,j+1])]))
elif var_key.startswith('w'):
if zn not in self._zones: self._zones.append(zn)
if 'water_source' not in self._variables:
self._variables += flxz_water_names
for var in flxz_water_names: self._data[var] = {}
lns = self._file.readlines()
i = 0
while lns[i].startswith('text'): i+=1
data = []
for ln in lns[i:]: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
for j,var_key in enumerate(flxz_water_names):
self._data[var_key].update(dict([(zn,data[:,j+1])]))
elif var_key.startswith('v'):
if zn not in self._zones: self._zones.append(zn)
if 'vapor_source' not in self._variables:
self._variables += flxz_vapor_names
for var in flxz_vapor_names: self._data[var] = {}
lns = self._file.readlines()
i = 0
while lns[i].startswith('text'): i+=1
data = []
for ln in lns[i:]: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
for j,var_key in enumerate(flxz_vapor_names):
self._data[var_key].update(dict([(zn,data[:,j+1])]))
def _read_data_surf(self,var_key):
self._variables.append(hist_var_names[var_key])
lns = self._file.readlines()
data = []
for ln in lns[i:]: data.append([float(d) for d in ln.strip().split(',')])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
self._data[hist_var_names[var_key]] = dict([(node,data[:,icol+1]) for icol,node in enumerate(self.nodes)])
def _read_data_default(self,var_key):
self._variables.append(hist_var_names[var_key])
lns = self._file.readlines()
data = []
for ln in lns[i:]: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
self._data[hist_var_names[var_key]] = dict([(node,data[:,icol+1]) for icol,node in enumerate(self.nodes)])
def _get_zones(self): return self._zones
def _set_zones(self,value): self._zones = value
zones = property(_get_zones, _set_zones) #: (*lst[int]*) List of zone indices for which output data are available.
class fnodeflux(object): # Reading and plotting methods associated with internode flux files.
'''Internode flux information.
Can read either water or CO2 internode flux files.
The fnodeflux object is indexed first by node pair - represented as a tuple of node indices - and then
by either the string 'liquid' or 'vapor'. Data values are in time order, as given in the 'times' attribute.
'''
def __init__(self,filename=None):
self._filename = filename
self._silent = dflt.silent
self._nodepairs = []
self._times = []
self._timesteps = []
self._data = {}
if self._filename: self.read(self._filename)
def __getitem__(self,key):
if key in self.nodepairs:
return self._data[key]
else: return None
def read(self,filename):
'''Read in FEHM contour output information.
:param filename: File name for output data, can include wildcards to define multiple output files.
:type filename: str
'''
if not os.path.isfile(filename):
pyfehm_print('ERROR: cannot find file at '+filename,self._silent)
return
fp = open(filename)
lns = fp.readlines()
N = int(lns[0].split()[1])
data = np.zeros((N,len(lns)/(N+1),2)) # temporary data storage struc
for ln in lns[1:N+1]:
ln = ln.split()
self._nodepairs.append((int(float(ln[0])),int(float(ln[1])))) # append node pair
for i in range(len(lns)/(N+1)):
ln = lns[(N+1)*i:(N+1)*(i+1)]
nums = ln[0].split()
self._timesteps.append(float(nums[2]))
self._times.append(float(nums[3]))
for j,lni in enumerate(ln[1:]):
lnis = lni.split()
data[j,i,0] = float(lnis[2])
data[j,i,1] = float(lnis[3])
for i,nodepair in enumerate(self.nodepairs):
self._data[nodepair] = dict([(var,data[i,:,icol]) for icol,var in enumerate(['vapor','liquid'])])
def _get_filename(self): return self._filename
def _set_filename(self,value): self._filename = value
filename = property(_get_filename, _set_filename) #: (*str*) filename target for internode flux file.
def _get_timesteps(self): return np.sort(self._timesteps)
def _set_timesteps(self,value): self._timesteps = value
timesteps = property(_get_timesteps, _set_timesteps) #: (*lst*) timestep for which node flux information is reported.
def _get_times(self): return np.sort(self._times)
def _set_times(self,value): self._times = value
times = property(_get_times, _set_times) #: (*lst*) times for which node flux information is reported.
def _get_nodepairs(self): return self._nodepairs
def _set_nodepairs(self,value): self._nodepairs = value
nodepairs = property(_get_nodepairs, _set_nodepairs) #: (*lst*) node pairs for which node flux information is available. Each node pair is represented as a two item tuple of node indices.
class ftracer(fhistory): # Derived class of fhistory, for tracer output
'''Tracer history output information object.
'''
def __init__(self,filename=None,output_filename=True):
super(ftracer,self).__init__(filename, output_filename)
self._filename=filename
self._output_filename = output_filename
self._times=[]
self._data={}
self._row=None
self._nodes=[]
self._variables=[]
self._keyrows={}
self.column_name=[]
self.num_columns=0
self._nkeys=1
if filename: self.read(filename)
if output_filename: self.read_output(output_filename)
def read(self,filename):
with open(filename,'r') as fp:
lns = fp.readlines()
Nnds = int(lns[2].strip()) # number of nodes
for i in range(Nnds): # indices of nodes
self.nodes.append(int(lns[3+i].strip().split()[0]))
Nsps = int(lns[3+Nnds].strip().split()[0]) # number of species
Nt = int(len(lns[4+Nnds:])/(2*Nsps)) # number of timesteps
ts = []
Cs = [[] for j in range(Nsps)]
for i in range(Nt):
ts.append(float(lns[2*Nsps*i+4+Nnds].split()[0].strip()))
for j in range(Nsps):
Cs[j].append([float(v) for v in lns[2*Nsps*i+2*j+4+Nnds+1].strip().split()])
for j in range(Nsps):
v = 'Caq{:03d}'.format(j+1)
self._variables.append(v)
C = np.array(Cs[j])
self._data[v] = dict([(node,C[:,icol]) for icol,node in enumerate(self.nodes)])
self._times = np.array(ts)
def read_output(self, output_filename):
with open(output_filename,'r') as fp:
lns = fp.readlines()
Nsps = len(self._variables)
Cs = [[] for j in range(Nsps)]
qs = []
ts = []
dts = []
for i,ln in enumerate(lns):
if ln.strip().startswith('Timing Information'):
t = float(lns[i+2].split()[1])
dt = float(lns[i+2].split()[2])*3600*24
if ln.strip().startswith('Nodal Information (Water)'):
q = [float(lns[i+3+j].split()[5]) for j in range(len(self.nodes))]
if ln.strip().startswith('Solute output information'):
ts.append(t)
dts.append(dt)
qs.append(q)
ind = int(ln.strip().split()[-1])
Cs[ind-1].append([float(lns[i+3+j].split()[4]) for j in range(len(self.nodes))])
qs = np.array(qs)
for j in range(Nsps):
v = 'Caq{:03d}_src'.format(j+1)
self._variables.append(v)
C = np.array(Cs[j])
self._data[v] = dict([(node,np.interp(self.times, ts, C[:,icol].T/qs[:,icol])) for icol,node in enumerate(self.nodes)])
class fptrk(fhistory): # Derived class of fhistory, for particle tracking output
'''Tracer history output information object.
'''
def __init__(self,filename=None,verbose=True):
super(fptrk,self).__init__(filename, verbose)
self._filename=None
self._silent = dflt.silent
self._times=[]
self._verbose = verbose
self._data={}
self._row=None
self._nodes=[0]
self._variables=[]
self._keyrows={}
self.column_name=[]
self.num_columns=0
self._nkeys=1
if filename: self._filename=filename; self.read(filename)
def read(self,filename): # read contents of file
'''Read in FEHM particle tracking output information. Index by variable name.
:param filename: File name for output data, can include wildcards to define multiple output files.
:type filename: str
'''
from glob import glob
import re
glob_pattern = re.sub(r'\[','[[]',filename)
glob_pattern = re.sub(r'(?<!\[)\]','[]]', glob_pattern)
files=glob(glob_pattern)
configured=False
for i,fname in enumerate(files):
if self._verbose:
pyfehm_print(fname,self._silent)
self._file=open(fname,'rU')
header=self._file.readline()
if header.strip()=='': continue # empty file
header=self._file.readline()
self._setup_headers_default(header)
self._read_data_default()
self._file.close()
def _setup_headers_default(self,header):
header=header.strip().split('"')[3:-1]
header = [h for h in header if h != ' ']
for var in header: self._variables.append(var)
def _read_data_default(self):
lns = self._file.readlines()
data = []
for ln in lns: data.append([float(d) for d in ln.strip().split()])
data = np.array(data)
if data[-1,0]<data[-2,0]: data = data[:-1,:]
self._times = np.array(data[:,0])
self._data = dict([(var,data[:,icol+1]) for icol,var in enumerate(self.variables)])
class multi_pdf(object):
'''Tool for making a single pdf document from multiple eps files.'''
def __init__(self,combineString = 'gswin64',
save='multi_plot.pdf',files = [],delete_files = True):
self.combineString = combineString
self._silent = dflt.silent
self._save = os_path(save)
self._delete_files = delete_files
self._assign_files(files)
def _assign_files(self,files):
if files == []: self._files = {}
if isinstance(files,list):
self._files = dict([(i+1,file) for i,file in enumerate(files)])
elif isinstance(files,dict):
ks = list(files.keys())
for k in ks:
if not isinstance(k,int):pyfehm_print('ERROR: Dictionary keys must be integers.',self._silent);return
self._files = files
elif isinstance(files,str):
self._files = dict(((1,files),))
def add(self,filename,pagenum=None):
'''Add a new page. If a page number is specified, the page will replace the current.
Otherwise it will be appended to the end of the document.
:param filename: Name of .eps file to be added.
:type filename: str
:param pagenum: Page number of file to be added.
:type pagenum: int
'''
if len(filename.split('.'))==1: filename += '.eps'
if not os.path.isfile(filename): print('WARNING: '+filename+' not found.'); return
if not filename.endswith('.eps'): print('WARNING: Non EPS format not supported.')
if pagenum and pagenum in list(self.files.keys()):
print('WARNING: Replacing '+self.files[pagenum])
self.files[pagenum] = filename
else:
if not pagenum: pagenum = self._pagemax+1
self._files.update(dict(((pagenum,filename),)))
def insert(self,filename,pagenum):
'''Insert a new page at the given page number.
:param filename: Name of .eps file to be inserted.
:type filename: str
:param pagenum: Page number of file to be inserted.
:type pagenum: int
'''
if len(filename.split('.'))==1: filename += '.eps'
if not os.path.isfile(filename): print('WARNING: '+filename+' found.'); return
if not filename.endswith('.eps'): print('WARNING: Non EPS format not supported.')
if pagenum > self._pagemax: self.add(filename); return
ks = list(self._files.keys())
self._files = dict([(k,self._files[k]) for k in ks if k < pagenum]+
[(pagenum,filename)]+[(k+1,self._files[k]) for k in ks if k >= pagenum])
def make(self):
'''Construct the pdf.'''
cs = self.combineString + ' -dBATCH -dNOPAUSE -sDEVICE=pdfwrite -sOutputFile='+self.save
for i in np.sort(list(self.files.keys())):
if not self.files[i].endswith('.eps'): print('WARNING: Cannot combine '+self.files[i]+'. EPS format required. Skipping...'); continue
if len(self.files[i].split()) != 1:
cs += ' "'+self.files[i]+'"'
else:
cs += ' '+self.files[i]
os.system(cs)
for i in np.sort(list(self.files.keys())):
if len(self.files[i].split()) != 1:
os.system(delStr+' "'+self.files[i]+'"')
else:
os.system(delStr+' '+self.files[i])
def _get_combineString(self): return self._combineString
def _set_combineString(self,value): self._combineString = value
combineString = property(_get_combineString, _set_combineString) #: (*str*) Command line command, with options, generate pdf from multiple eps files. See manual for further instructions.
def _get_files(self): return self._files
files = property(_get_files) #: (*lst[str]*) List of eps files to be assembled into pdf.
def _get_pagemax(self):
ks = list(self._files.keys())
for k in ks:
if not isinstance(k,int): print('ERROR: Non integer dictionary key'); return
if len(ks) == 0: return 0
return np.max(ks)
_pagemax = property(_get_pagemax)
def _get_save(self): return self._save
def _set_save(self,value): self._save = value
save = property(_get_save, _set_save) #: (*str*) Name of the final pdf to output.
"""Classes for VTK output."""
class fStructuredGrid(pv.StructuredGrid):
def __init__(self,dimensions,points):
pv.StructuredGrid.__init__(self,dimensions,points)
def to_string(self, time = None, format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET STRUCTURED_GRID']
# include time information
if time is not None:
ret.append('FIELD FieldData 2')
ret.append('TIME 1 1 double')
ret.append('%8.7f'%time)
ret.append('CYCLE 1 1 int')
ret.append('123')
ret.append('DIMENSIONS %s %s %s'%self.dimensions)
ret.append('POINTS %s %s'%(self.get_size(),t))
ret.append(self.seq_to_string(self.points,format,t))
return '\n'.join(ret)
class fUnstructuredGrid(pv.UnstructuredGrid):
def __init__(self,points,vertex=[],poly_vertex=[],line=[],poly_line=[],
triangle=[],triangle_strip=[],polygon=[],pixel=[],
quad=[],tetra=[],voxel=[],hexahedron=[],wedge=[],pyramid=[]):
pv.UnstructuredGrid.__init__(self,points,vertex=vertex,poly_vertex=poly_vertex,line=line,poly_line=poly_line,
triangle=triangle,triangle_strip=triangle_strip,polygon=polygon,pixel=pixel,
quad=quad,tetra=tetra,voxel=voxel,hexahedron=hexahedron,wedge=wedge,pyramid=pyramid)
def to_string(self,time = None,format='ascii'):
t = self.get_datatype(self.points)
ret = ['DATASET UNSTRUCTURED_GRID']
# include time information
if time is not None:
ret.append('FIELD FieldData 2')
ret.append('TIME 1 1 double')
ret.append('%8.7f'%time)
ret.append('CYCLE 1 1 int')
ret.append('123')
ret.append('POINTS %s %s'%(self.get_size(),t))
ret.append(self.seq_to_string(self.points,format,t))
tps = []
r = []
sz = 0
for k in list(self._vtk_cell_types_map.keys()):
kv = getattr(self,k)
if kv==[] or kv[0]==[]: continue
s = self.seq_to_string([[len(v)]+list(v) for v in kv],format,'int')
r .append(s)
for v in kv:
tps.append(self._vtk_cell_types_map[k])
sz += len(v)+1
sep = (format=='ascii' and '\n') or (format=='binary' and '')
r = sep.join(r)
ret += ['CELLS %s %s'%(len(tps),sz),
r,
'CELL_TYPES %s'%(len(tps)),
self.seq_to_string(tps,format,'int')]
return '\n'.join(ret)
class fVtkData(pv.VtkData):
def __init__(self,*args,**kws):
pv.VtkData.__init__(self,*args,**kws)
self.times = []
self.material = pv.PointData()
self.contour = {}
def to_string(self, time=None, format = 'ascii',material=False):
ret = ['# vtk DataFile Version 2.0',
self.header,
format.upper(),
self.structure.to_string(time=time,format=format)
]
if self.cell_data.data:
ret.append(self.cell_data.to_string(format=format))
if material:
ret.append(self.material.to_string(format=format))
else:
if self.contour[time].data:
ret.append(self.contour[time].to_string(format=format))
return '\n'.join(ret)
def tofile(self, filename, format = 'ascii'):
"""Save VTK data to file.
"""
written_files = []
if not pv.common.is_string(filename):
raise TypeError('argument filename must be string but got %s'%(type(filename)))
if format not in ['ascii','binary']:
raise TypeError('argument format must be ascii | binary')
filename = filename.strip()
if not filename:
raise ValueError('filename must be non-empty string')
if filename[-4:]!='.vtk':
filename += '.vtk'
# first write material properties file
filename_int = ''.join(filename[:-4]+'_mat.vtk')
f = open(filename_int,'wb')
f.write(self.to_string(format=format,material=True).encode('utf-8'))
f.close()
written_files.append(filename_int)
# write contour output file
times = np.sort(list(self.contour.keys()))
for i,time in enumerate(times):
if len(times)>1:
filename_int = ''.join(filename[:-4]+'.%04i'%i+'.vtk')
else:
filename_int = filename
#print 'Creating file',`filename`
f = open(filename_int,'wb')
f.write(self.to_string(time=time,format=format).encode('utf-8'))
f.close()
written_files.append(filename_int)
return written_files
def tofilewell(self,filename,format = 'ascii'):
"""Save VTK data to file.
"""
written_files = []
if not pv.common.is_string(filename):
raise TypeError('argument filename must be string but got %s'%(type(filename)))
if format not in ['ascii','binary']:
raise TypeError('argument format must be ascii | binary')
filename = filename.strip()
# first write material properties file
f = open(filename,'wb')
f.write(self.to_string(format=format,material=True).encode('utf-8'))
f.close()
class fvtk(object):
def __init__(self,parent,filename,contour,diff,zscale,spatial_derivatives,time_derivatives):
self.parent = parent
self.path = fpath(parent = self)
self.path.filename = filename
self.data = None
self.csv = None
self.contour = contour
self.variables = []
self.materials = []
self.zones = []
self.diff = diff
self.spatial_derivatives = spatial_derivatives
self.time_derivatives = time_derivatives
self.zscale = zscale
self.wells = None
def __getstate__(self):
return dict((k, getattr(self, k)) for k in self.__slots__)
def __setstate__(self, data_dict):
for (name, value) in data_dict.items():
setattr(self, name, value)
def assemble(self):
"""Assemble all information in pyvtk objects."""
self.assemble_grid() # add grid information
self.assemble_zones() # add zone information
self.assemble_properties() # add permeability data
if self.contour != None: # add contour data
self.assemble_contour()
def assemble_grid(self):
"""Assemble grid information in pyvtk objects."""
# node positions, connectivity information
nds = np.array([nd.position for nd in self.parent.grid.nodelist])
if self.zscale != 1.:
zmin = np.min(nds[:,2])
nds[:,2] = (nds[:,2]-zmin)*self.zscale+zmin
if isinstance(self.parent.grid.elemlist[0],list):
cns = [[nd-1 for nd in el] for el in self.parent.grid.elemlist]
else:
cns = [[nd.index-1 for nd in el.nodes] for el in self.parent.grid.elemlist]
# make grid
if len(cns[0]) == 3:
self.data = fVtkData(fUnstructuredGrid(nds,triangle=cns),'PyFEHM VTK model output')
elif len(cns[0]) == 4:
self.data = fVtkData(fUnstructuredGrid(nds,tetra=cns),'PyFEHM VTK model output')
elif len(cns[0]) == 8:
self.data = fVtkData(fUnstructuredGrid(nds,hexahedron=cns),'PyFEHM VTK model output')
else:
print("ERROR: Number of connections in connectivity not recognized: "+str(len(cns[0])))
return
# grid information
dat = np.array([nd.position for nd in self.parent.grid.nodelist])
nds = np.array([nd.index for nd in self.parent.grid.nodelist])
self.data.material.append(pv.Scalars(nds,name='n',lookup_table='default'))
self.data.material.append(pv.Scalars(dat[:,0],name='x',lookup_table='default'))
self.data.material.append(pv.Scalars(dat[:,1],name='y',lookup_table='default'))
self.data.material.append(pv.Scalars(dat[:,2],name='z',lookup_table='default'))
self.x_lim = [np.min(dat[:,0]),np.max(dat[:,0])]
self.y_lim = [np.min(dat[:,1]),np.max(dat[:,1])]
self.z_lim = [np.min(dat[:,2]),np.max(dat[:,2])]
self.n_lim = [1,len(self.parent.grid.nodelist)]
def assemble_zones(self):
"""Assemble zone information in pyvtk objects."""
# zones will be considered material properties as they only need to appear once
N = len(self.parent.grid.nodelist)
nds = np.zeros((1,N))[0]
self.parent.zonelist.sort(key=lambda x: x.index)
for zn in self.parent.zonelist:
if zn.index == 0: continue
name = 'zone%04i'%zn.index
if zn.name: name += '_'+zn.name.replace(' ','_')
self.zones.append(name)
zn_nds = copy(nds)
for nd in zn.nodelist: zn_nds[nd.index-1] = 1
self.data.material.append(
pv.Scalars(zn_nds,
name=name,
lookup_table='default'))
def assemble_properties(self):
"""Assemble material properties in pyvtk objects."""
# permeabilities
perms = np.array([nd.permeability for nd in self.parent.grid.nodelist])
if not all(v is None for v in perms):
if np.mean(perms)>0.: perms = np.log10(perms)
self.add_material('perm_x',perms[:,0])
self.add_material('perm_y',perms[:,1])
self.add_material('perm_z',perms[:,2])
else:
blank = [-1.e30 for nd in self.parent.grid.nodelist]
self.add_material('perm_x',blank)
self.add_material('perm_y',blank)
self.add_material('perm_z',blank)
props = np.array([[nd.density, nd.porosity, nd.specific_heat, nd.youngs_modulus,nd.poissons_ratio,nd.thermal_expansion,nd.pressure_coupling] for nd in self.parent.grid.nodelist])
names = ['density','porosity','specific_heat','youngs_modulus','poissons_ratio','thermal_expansion','pressure_coupling']
for name, column in zip(names,props.T):
self.add_material(name,column)
def add_material(self,name,data):
if all(v is None for v in data): return # if all None, no data to include
data = np.array([dt if dt is not None else -1.e30 for dt in data]) # check for None, replace with -1.e30
self.data.material.append(pv.Scalars(data,name=name,lookup_table='default'))
self.materials.append(name)
self.__setattr__(name+'_lim',[np.min(data),np.max(data)])
def assemble_contour(self):
"""Assemble contour output in pyvtk objects."""
self.data.contour = dict([(time,pv.PointData()) for time in self.contour.times])
if self.diff: time0 = self.contour.times[0]
for time in self.contour.times:
do_lims = (time == self.contour.times[-1])
for var in self.contour.variables+self.contour.user_variables:
# skip conditions
if var in self.contour.variables:
if time != self.contour.times[0] and var in ['x','y','z','n']: continue
else:
if var not in list(self.contour[time].keys()): continue
if self.diff:
if var not in list(self.contour[time0].keys()): continue
# field for contour variable
if var not in self.variables: self.variables.append(var)
self.data.contour[time].append(pv.Scalars(self.contour[time][var],name=var,lookup_table='default'))
if var in ['x','y','z','n']: continue
if do_lims: self.__setattr__(var+'_lim',[np.min(self.contour[time][var]),np.max(self.contour[time][var])])
# differences from initial value
if self.diff:
self.data.contour[time].append(pv.Scalars(self.contour[time][var]-self.contour[time0][var],name='diff_'+var,lookup_table='default'))
# time derivatives
if self.time_derivatives:
# find position, determines type of differencing
ind = np.where(time==self.contour.times)[0][0]
if ind == 0:
# forward difference
dt = self.contour.times[1]-time
f0 = self.contour[time][var]
f1 = self.contour[self.contour.times[1]][var]
dat = (f1-f0)/dt
elif ind == (len(self.contour.times)-1):
# backward difference
dt = time-self.contour.times[-2]
f0 = self.contour[self.contour.times[-2]][var]
f1 = self.contour[time][var]
dat = (f1-f0)/dt
else:
# central difference
dt1 = time - self.contour.times[ind-1]
dt2 = self.contour.times[ind+1] - time
f0 = self.contour[self.contour.times[ind-1]][var]
f1 = self.contour[time][var]
f2 = self.contour[self.contour.times[ind+1]][var]
dat = -dt2/(dt1*(dt1+dt2))*f0 + (dt2-dt1)/(dt1*dt2)*f1 + dt1/(dt2*(dt1+dt2))*f2
self.data.contour[time].append(pv.Scalars(dat,name='d_'+var+'_dt',lookup_table='default'))
if 'flux_x' in self.contour.variables and 'flux_y' in self.contour.variables and 'flux_z' in self.contour.variables:
flux = [(self.contour[time]['flux_x'][i],self.contour[time]['flux_y'][i],self.contour[time]['flux_z'][i]) for i in range(len(self.contour[time]['flux_x']))]
self.data.contour[time].append(pv.Vectors(flux,name='flux'))
if 'flux_x_vap' in self.contour.variables and 'flux_y_vap' in self.contour.variables and 'flux_z_vap' in self.contour.variables:
flux = [(self.contour[time]['flux_x_vap'][i],self.contour[time]['flux_y_vap'][i],self.contour[time]['flux_z_vap'][i]) for i in range(len(self.contour[time]['flux_x_vap']))]
self.data.contour[time].append(pv.Vectors(flux,name='flux_vap'))
def write(self):
"""Call to write out vtk files."""
if self.parent.work_dir: wd = self.parent.work_dir
else: wd = self.parent._path.absolute_to_file
if wd is None: wd = ''
else: wd += os.sep
fls = self.data.tofile(wd+self.path.filename)
# save file names for later use
self.material_file = fls[0]
self.contour_files = []
if len(fls)>1:
self.contour_files = fls[1:]
return fls
def write_wells(self,wells):
"""Receives a dictionary of well track objects, creates the corresponding vtk grid.
"""
nds = np.array([nd.position for nd in self.parent.grid.nodelist])
zmin = np.min(nds[:,2])
for k in list(wells.keys()):
well = wells[k]
if isinstance(well,np.ndarray):
nds = well
cns = [[i,i+1] for i in range(np.shape(nds)[0]-1)]
grid = fVtkData(fUnstructuredGrid(nds,line=cns),'Well track: %s'%k)
grid.material.append(pv.Scalars(np.ones((1,len(nds[:,2])))[0],name='T',lookup_table='default'))
else:
nds = np.array([[well.location[0],well.location[1],(z-zmin)*self.zscale+zmin] for z in well.data[:,0]])
cns = [[i,i+1] for i in range(np.shape(nds)[0]-1)]
grid = fVtkData(fUnstructuredGrid(nds,line=cns),'RAGE well track: %s'%well.name)
grid.material.append(pv.Scalars(well.data[:,1] ,name='T',lookup_table='default'))
filename=k+'_wells.vtk'
grid.tofilewell(filename)
self.wells = list(wells.keys())
def initial_display(self,show):
"""Determines what variable should be initially displayed."""
mat_vars = ['n','x','y','z','perm_x','perm_y','perm_z','porosity','density','cond_x','cond_y','cond_z']
if self.contour:
cont_vars = self.contour.variables
# convert k* format to perm_*
if show == 'kx': show = 'perm_x'
elif show == 'ky': show = 'perm_y'
elif show == 'kz': show = 'perm_z'
# check for unspecified coordinate in potentially anisotropic properties
if show in ['permeability','perm']:
print('NOTE: plotting z-component of permeability, for other components specify show=\'perm_x\', etc.')
show = 'perm_z'
if show in ['conducitivity','cond']:
print('NOTE: plotting z-component of conductivity, for other components specify show=\'cond_x\', etc.')
show = 'cond_z'
# check if material property or contour output requested for display
if show in mat_vars:
self.initial_show = 'material'
self.default_material_property = show
self.default_material_lims = self.__getattribute__(show+'_lim')
if self.contour:
# get default contour variable to display
for var in self.contour.variables:
if var not in ['x','y','z','n']: break
self.default_contour_variable = var
self.default_contour_lims = self.__getattribute__(var+'_lim')
elif show in cont_vars:
self.initial_show = 'contour'
self.default_contour_variable = show
self.default_contour_lims = self.__getattribute__(show+'_lim')
self.default_material_property = 'perm_x' # default
self.default_material_lims = self.__getattribute__('perm_x_lim')
else:
print('ERROR: requested property or variable does not exist, available options are...')
print('Material properties:')
for mat in mat_vars:
print(' - '+mat)
print('Contour output variables:')
for var in cont_vars:
print(' - '+var)
print('')
def startup_script(self,nodes):
x0,x1 = self.parent.grid.xmin, self.parent.grid.xmax
y0,y1 = self.parent.grid.ymin, self.parent.grid.ymax
z0,z1 = self.parent.grid.zmin, self.parent.grid.zmax
z1 = self.zscale*(z1-z0)+z0
xm,ym,zm = (x0+x1)/2., (y0+y1)/2., (z0+z1)/2.
xr,yr,zr = (x1-x0), (y1-y0), (z1-z0)
dflt_mat = '\''+self.default_material_property+'\''
mat_lim = self.default_material_lims
f = open('pyfehm_paraview_startup.py','w')
contour_files=[file for file in self.contour_files]
################################### load paraview modules ######################################
lns = [
'try: paraview.simple',
'except: from paraview.simple import *',
'paraview.simple._DisableFirstRenderCameraReset()',
'',
]
################################### load material properties ###################################
lns += ['mat_prop = LegacyVTKReader( FileNames=[']
file = self.material_file.replace('\\','/')
lns += ['\''+file+'\',']
lns += ['] )']
lns += ['RenameSource("model", mat_prop)']
################################### initial property display ###################################
lns += [
'rv = GetRenderView()',
'dr = Show()',
'dr.ScalarOpacityUnitDistance = 1.7320508075688779',
'dr.EdgeColor = [0.0, 0.0, 0.5]',
'',
'rv.CenterOfRotation = [%10.5f, %10.5f, %10.5f]'%(xm,ym,zm),
'',
'rv.CameraViewUp = [-0.4, -0.11, 0.92]',
'rv.CameraPosition = [%10.5f, %10.5f, %10.5f]'%(xm+2.5*xr,ym+1.5*yr,zm+1.5*zr),
'rv.CameraFocalPoint = [%10.5f, %10.5f, %10.5f]'%(xm,ym,zm),
'',
'mr = GetDisplayProperties(mat_prop)',
'mr.Representation = \'Surface With Edges\'',
'',
'lt = GetLookupTableForArray( '+dflt_mat+', 1, RGBPoints=[%4.2f, 0.23, 0.299, 0.754, %4.2f, 0.706, 0.016, 0.15], VectorMode=\'Magnitude\', NanColor=[0.25, 0.0, 0.0], ColorSpace=\'Diverging\', ScalarRangeInitialized=1.0 )'%tuple(mat_lim),
'',
'pf = CreatePiecewiseFunction( Points=[%4.2f, 0.0, 0.5, 0.0, %4.2f, 1.0, 0.5, 0.0] )'%tuple(mat_lim),
'',
'mr.ScalarOpacityFunction = pf',
'mr.ColorArrayName = (\'POINT_DATA\', '+dflt_mat+')',
'mr.LookupTable = lt',
'',
'lt.ScalarOpacityFunction = pf',
'',
'ScalarBarWidgetRepresentation1 = CreateScalarBar( Title='+dflt_mat+', LabelFontSize=12, Enabled=1, TitleFontSize=12 )',
'GetRenderView().Representations.append(ScalarBarWidgetRepresentation1)',
'',
'lt = GetLookupTableForArray('+dflt_mat+', 1 )',
'',
'ScalarBarWidgetRepresentation1.LookupTable = lt',
'',
]
################################### load in nodes as glyphs ###################################
ndRadius = np.min([con.distance for con in self.parent.grid.connlist])/10.
ndRadius = ndRadius*self.zscale
if nodes:
lns += [
'AnimationScene1 = GetAnimationScene()',
'AnimationScene1.AnimationTime = 0.0',
'rv.ViewTime = 0.0',
'source = FindSource("model")',
'SetActiveSource(source)',
'',
'G = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )',
'G.GlyphTransform = "Transform2"',
'G.GlyphType = "Sphere"',
'G.RandomMode = 0',
'G.ScaleMode = \'off\'',
'G.MaskPoints = 0',
'G.GlyphType.Radius = %10.5f'%ndRadius,
'',
'RenameSource("nodes", G)',
'',
'rv = GetRenderView()',
'mr = GetDisplayProperties(source)',
'dr = Show()',
'dr.ColorArrayName = (\'POINT_DATA\', \'n\')',
'dr.ScaleFactor = 1.1',
'dr.SelectionPointFieldDataArrayName = "nodes"',
'dr.EdgeColor = [0.0, 0.0, 0.5000076295109483]',
'dr.ColorArrayName = (\'POINT_DATA\', \'\')',
'dr.DiffuseColor = [0.,0.,0.]',
'dr.Visibility = 0',
]
################################### load in zones as glyphs ###################################
colors = [
[1.,1.,0.],
[1.,0.,1.],
[0.,1.,1.],
[1.,1.,0.5],
[1.,0.5,1.],
[0.5,1.,1.],
[1.,1.,0.25],
[1.,0.25,1.],
[0.25,1.,1.],
[1.,1.,0.75],
[1.,0.75,1.],
[0.75,1.,1.],
[0.5,1.,0.5],
[1.,0.5,0.5],
[0.5,0.5,1.],
[0.5,0.75,0.5],
[0.75,0.5,0.5],
[0.5,0.5,0.75],
[0.5,0.25,0.5],
[0.25,0.5,0.5],
[0.5,0.5,0.25],
[0.75,1.,0.75],
[1.,0.75,0.75],
[0.75,0.75,1.],
[0.75,0.5,0.75],
[0.5,0.75,0.75],
[0.75,0.75,0.5],
[0.75,0.25,0.75],
[0.25,0.75,0.75],
[0.75,0.75,0.25],
[0.25,1.,0.25],
[1.,0.25,0.25],
[0.25,0.25,1.],
[0.25,0.75,0.25],
[0.75,0.25,0.25],
[0.25,0.25,0.75],
[0.25,0.5,0.25],
[0.5,0.25,0.25],
[0.25,0.25,0.5],
]
zones = []; cols = []
for zone,color in zip(self.zones,colors):
if self.show_zones == 'user':
if ('XMIN' in zone) or ('XMAX' in zone) or ('YMIN' in zone) or ('YMAX' in zone) or ('ZMIN' in zone) or ('ZMAX' in zone): continue
elif self.show_zones == 'none': continue
elif isinstance(self.show_zones,list):
if zone not in ['zone%04i_%s'%(zn.index,zn.name) for zn in self.show_zones]: continue
zones.append(zone)
cols.append(color)
lns += ['cols = [']
for col in cols:
lns += ['[%3.2f,%3.2f,%3.2f],'%tuple(col)]
lns += [']']
lns += ['zones = [']
for zone in zones:
lns += ['\''+zone+'\',']
lns += [']']
lns += ['for zone,col in zip(zones,cols):',
'\tAnimationScene1 = GetAnimationScene()',
'\tAnimationScene1.AnimationTime = 0.0',
'\trv.ViewTime = 0.0',
'\tsource = FindSource("model")',
'\tSetActiveSource(source)',
'\t',
'\tG = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )',
'\tG.GlyphTransform = "Transform2"',
'\tG.Scalars = [\'POINTS\', zone]',
'\tG.ScaleMode = \'scalar\'',
'\tG.GlyphType = "Sphere"',
'\tG.RandomMode = 0',
'\tG.MaskPoints = 0',
'\t',
'\tG.GlyphType.Radius = %10.5f'%(2*ndRadius),
'\t',
'\tRenameSource(zone, G)',
'\t',
'\trv = GetRenderView()',
'\tmr = GetDisplayProperties(source)',
'\tdr = Show()',
'\tdr.ColorArrayName = (\'POINT_DATA\', \'n\')',
'\tdr.ScaleFactor = 1.1',
'\tdr.SelectionPointFieldDataArrayName = zone',
'\tdr.EdgeColor = [0.0, 0.0, 0.5000076295109483]',
'\tdr.Opacity = 0.5',
'\t',
'\tlt = GetLookupTableForArray(zone, 1, RGBPoints=[0.0, 0.23, 0.299, 0.754, 0.5, 0.865, 0.865, 0.865, 1.0]+col, VectorMode=\'Magnitude\', NanColor=[0.25, 0.0, 0.0], ColorSpace=\'Diverging\', ScalarRangeInitialized=1.0 )',
'\t',
'\tpf = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )',
'\t',
'\tdr.ColorArrayName = (\'POINT_DATA\', zone)',
'\tdr.LookupTable = lt',
'\tdr.Visibility = 0',
'\t',
'\tlt.ScalarOpacityFunction = pf',
]
################################### load in contour output ###################################
if len(contour_files)>0:
lns += ['contour_output = LegacyVTKReader( FileNames=[']
for file in contour_files:
file = file.replace('\\','/')
lns += ['\''+file+'\',']
lns += ['] )']
lns += ['RenameSource("contour_output", contour_output)']
################################### set up initial visualisation ###################################
dflt_cont = '\''+self.default_contour_variable+'\''
cont_lim = self.default_contour_lims
viewTime = len(self.contour.times)-1
lns += [
'lt = GetLookupTableForArray('+dflt_cont+', 1, RGBPoints=[%10.5f, 0.23, 0.299, 0.754, %10.5f, 0.706, 0.016, 0.15], VectorMode=\'Magnitude\', NanColor=[0.25, 0.0, 0.0], ColorSpace=\'Diverging\', ScalarRangeInitialized=1.0 )'%tuple(cont_lim),
'',
'pf = CreatePiecewiseFunction( Points=[%10.5f, 0.0, 0.5, 0.0, %10.5f, 1.0, 0.5, 0.0] )'%tuple(cont_lim),
'',
'dr = Show() #dr = DataRepresentation1',
'dr.Representation = \'Surface With Edges\'',
'dr.EdgeColor = [0.15, 0.15, 0.15]',
'dr.ScalarOpacityFunction = pf',
'dr.ColorArrayName = (\'POINT_DATA\', '+dflt_cont+')',
'dr.ScalarOpacityUnitDistance = 1.7320508075688779',
'dr.LookupTable = lt',
'',
'rv.ViewTime = %4i'%viewTime,
'',
'ScalarBarWidgetRepresentation1 = CreateScalarBar( Title='+dflt_cont+', LabelFontSize=12, Enabled=1, LookupTable=lt, TitleFontSize=12 )',
'GetRenderView().Representations.append(ScalarBarWidgetRepresentation1)',
'',
]
if len(contour_files)>0:
lns+= [
'model = FindSource("model")',
'model_rep = GetDisplayProperties(model)',
'contour_output = FindSource("contour_output")',
'cont_rep = GetDisplayProperties(contour_output)',
]
if self.initial_show == 'material':
lns+=[
'model_rep.Visibility = 1',
'cont_rep.Visibility = 0 ',
]
elif self.initial_show == 'contour':
lns += [
'model_rep.Visibility = 0',
'cont_rep.Visibility = 1',
]
if self.csv is not None:
################################### load in history output ###################################
lns += ['xyview = CreateXYPlotView()']
lns += ['xyview.BottomAxisRange = [0.0, 5.0]']
lns += ['xyview.TopAxisRange = [0.0, 6.66]']
lns += ['xyview.ViewTime = 0.0']
lns += ['xyview.LeftAxisRange = [0.0, 10.0]']
lns += ['xyview.RightAxisRange = [0.0, 6.66]']
lns += ['']
if os.path.isfile(self.csv.filename):
lns += ['hout = CSVReader( FileName=[r\''+self.csv.filename+'\'] )']
lns += ['RenameSource("history_output",hout)']
fp = open(self.csv.filename)
ln = fp.readline().rstrip()
fp.close()
headers = [lni for lni in ln.split(',')]
# put variables in order to account for diff or time derivatives
vars = []
for variable in self.csv.history.variables:
vars.append(variable)
if self.csv.diff: vars.append('diff_'+variable)
#if self.time_derivatives: vars.append('d'+variable+'_dt')
for i,variable in enumerate(vars):
plot_title = variable+'_history'
lns += []
lns += [plot_title+' = PlotData()']
lns += ['RenameSource("'+plot_title+'",'+plot_title+')']
lns += ['mr = Show()']
lns += ['mr = GetDisplayProperties('+plot_title+')']
lns += ['mr.XArrayName = \'time\'']
lns += ['mr.UseIndexForXAxis = 0']
lns += ['mr.SeriesColor = [\'time\', \'0\', \'0\', \'0\']']
lns += ['mr.AttributeType = \'Row Data\'']
switch_off = [header for header in headers if not header.strip().startswith(variable+':')]
ln = 'mr.SeriesVisibility = [\'vtkOriginalIndices\', \'0\', \'time\', \'0\''
for header in switch_off:
ln+=', \''+header+'\',\'0\''
#lns += ['mr.SeriesVisibility = [\'vtkOriginalIndices\', \'0\', \'time\', \'0\']']
lns += [ln+']']
if i != (len(vars)-1):
lns += ['mr.Visibility = 0']
lns += ['']
lns += ['AnimationScene1.ViewModules = [ RenderView1, SpreadSheetView1, XYChartView1 ]']
lns += ['Render()']
######################################## load in well ########################################
if self.wells is not None:
lns += ['model_rep.Representation = \'Outline\'']
for well in self.wells:
lns += ['%s=LegacyVTKReader(FileNames=[r\'%s\'])'%(well,os.getcwd()+os.sep+well+'_wells.vtk')]
lns += ['RenameSource("%s", %s)'%(well,well)]
lns += ['SetActiveSource(%s)'%well]
lns += ['dr = Show()']
lns += ['dr.ScaleFactor = 1366.97490234375']
lns += ['dr.SelectionCellFieldDataArrayName = \'Name\'']
lns += ['mr = GetDisplayProperties(%s)'%well]
lns += ['mr.LineWidth=4.0']
f.writelines('\n'.join(lns))
f.close()
def _get_filename(self): return self.path.absolute_to_file+os.sep+self.path.filename
filename = property(_get_filename) #: (**)
class fcsv(object):
def __init__(self,parent,filename,history,diff,time_derivatives):
self.parent = parent
self.path = fpath(parent = self)
self.path.filename = filename
self.data = None
self.history = history
self.diff = diff
self.time_derivatives = time_derivatives
if diff:
self.assemble_diff()
if time_derivatives:
self.assemble_time_derivatives()
def assemble_diff(self):
for variable in self.history.variables:
for node in self.history.nodes:
self.history.new_variable('diff_'+variable,node,self.history[variable][node]-self.history[variable][node][0])
def assemble_time_derivatives(self):
return
#for variable in self.history.variables:
# for node in self.history.nodes:
# data = self.history[variable][node]
# time = self.history.times
# self.history.new_variable('d'+variable+'_dt',node,dt)
#
# ind = np.where(time==self.contour.times)[0][0]
# if ind == 0:
# # forward difference
# dt = self.contour.times[1]-time
# f0 = self.contour[time][var]
# f1 = self.contour[self.contour.times[1]][var]
# dat = (f1-f0)/dt
# elif ind == (len(self.contour.times)-1):
# # backward difference
# dt = time-self.contour.times[-2]
# f0 = self.contour[self.contour.times[-2]][var]
# f1 = self.contour[time][var]
# dat = (f1-f0)/dt
# else:
# # central difference
# dt1 = time - self.contour.times[ind-1]
# dt2 = self.contour.times[ind+1] - time
# f0 = self.contour[self.contour.times[ind-1]][var]
# f1 = self.contour[time][var]
# f2 = self.contour[self.contour.times[ind+1]][var]
# dat = -dt2/(dt1*(dt1+dt2))*f0 + (dt2-dt1)/(dt1*dt2)*f1 + dt1/(dt2*(dt1+dt2))*f2
# self.data.contour[time].append(pv.Scalars(dat,name='d_'+var+'_dt',lookup_table='default'))
def write(self):
"""Call to write out csv files."""
if self.parent.work_dir: wd = self.parent.work_dir
else: wd = self.parent._path.absolute_to_file
if wd is None: wd = ''
else: wd += os.sep
# write one large .csv file for all variables, nodes
from string import join
fp = open(wd+self.path.filename,'w')
self.filename = wd+self.path.filename
# write headers
ln = '%16s,'%('time')
vars = []
for variable in self.history.variables:
vars.append(variable)
if self.diff: vars.append('diff_'+variable)
#if self.time_derivatives: vars.append('d'+variable+'_dt')
for variable in vars:
for node in self.history.nodes:
var = variable
#if len(var)>6: var = var[:6]
ln += '%16s,'%(var+': nd '+str(node))
fp.write(ln[:-1]+'\n')
# write row for each time
for i,time in enumerate(self.history.times): # each row is one time output
ln = '%16.8e,'%time
for variable in vars:
for node in self.history.nodes: # each column is one node
ln += '%16.8e,'%self.history[variable][node][i]
ln = ln[:-1]+'\n'
fp.write(ln)
fp.close()
def fdiff( in1, in2, format='diff', times=[], variables=[], components=[], nodes=[]):
'''Take the difference of two fpost objects
:param in1: First fpost object
:type filename: fpost object (fcontour)
:param in2: First fpost object
:type filename: fpost object (fcontour)
:param format: Format of diff: diff->in1-in2 relative->(in1-in2)/abs(in2) percent->100*abs((in1-in2)/in2)
:type format: str
:param times: Times to diff
:type times: lst(fl64)
:param variables: Variables to diff
:type variables: lst(str)
:param components: Components to diff (foutput objects)
:type components: lst(str)
:returns: fpost object of same type as in1 and in2
'''
# Copy in1 and in2 in case they get modified below
in1 = deepcopy(in1)
in2 = deepcopy(in2)
if type(in1) is not type(in2):
print("ERROR: fpost objects are not of the same type: "+str(type(in1))+" and "+str(type(in2)))
return
if isinstance(in1, fcontour) or isinstance(in1, fhistory) or 'foutput' in str(in1.__class__):
# Find common timesclear
t = np.intersect1d(in1.times,in2.times)
if len(t) == 0:
print("ERROR: fpost object times do not have any matching values")
return
if len(times) > 0:
times = np.intersect1d(times,t)
if len(times) == 0:
print("ERROR: provided times are not coincident with fpost object times")
return
else:
times = t
if isinstance(in1, fcontour):
# Find common variables
v = np.intersect1d(in1.variables,in2.variables)
if len(v) == 0:
print("ERROR: fcontour object variables do not have any matching values")
return
if len(variables) > 0:
variables = np.intersect1d(variables,v)
if len(variables) == 0:
print("ERROR: provided variables are not coincident with fcontour object variables")
return
else:
variables = v
out = deepcopy(in1)
out._times = times
out._variables = variables
out._data = {}
for t in times:
if format is 'diff':
out._data[t] = dict([(v,in1[t][v] - in2[t][v]) for v in variables])
elif format is 'relative':
out._data[t] = dict([(v,(in1[t][v] - in2[t][v])/np.abs(in2[t][v])) for v in variables])
elif format is 'percent':
out._data[t] = dict([(v,100*np.abs((in1[t][v] - in2[t][v])/in2[t][v])) for v in variables])
return out
#Takes the difference of two fhistory objects.
elif isinstance(in1, fhistory):
# Find common variables
v = np.intersect1d(in1.variables, in2.variables)
if len(v) == 0:
print("ERROR: fhistory object variables do not have any matching values")
return
if len(variables) > 0:
variables = np.intersect1d(variables,v)
if len(variables) == 0:
print("ERROR: provided variables are not coincident with fhistory object variables")
return
else:
variables = v
#Find common nodes.
n = np.intersect1d(in1.nodes, in2.nodes)
if len(n) == 0:
print("ERROR: fhistory object nodes do not have any matching values")
return
if len(nodes) > 0:
nodes = np.intersect1d(nodes,n)
if len(nodes) == 0:
print("ERROR: provided nodes are not coincident with fhistory object nodes")
return
else:
nodes = n
#Set up the out object.
out = deepcopy(in1)
out._times = times
out._variables = variables
out._nodes = nodes
out._data = {}
#Find the difference at each time index for a variable and node.
for v in variables:
for n in nodes:
i = 0
diff = []
while i < len(times):
if format is 'diff':
#Quick fix to handle ptrk files.
if isinstance(in1, fptrk):
diff.append(in1[v][n]-in2[v][n])
else:
diff.append(in1[v][n][i]-in2[v][n][i])
elif format is 'relative':
diff.append((in1[t][v] - in2[t][v])/np.abs(in2[t][v]))
elif format is 'percent':
diff.append(100*np.abs((in1[t][v] - in2[t][v])/in2[t][v]))
i = i + 1
if isinstance(in1, fptrk):
out._data[v] = np.array(diff)
else:
out._data[v] = dict([(n, diff)])
#Return the difference.
return out
elif 'foutput' in str(in1.__class__):
# Find common components
c = np.intersect1d(in1.components,in2.components)
if len(c) == 0:
print("ERROR: foutput object components do not have any matching values")
return
if len(components) > 0:
components = np.intersect1d(components,c)
if len(components) == 0:
print("ERROR: provided components are not coincident with foutput object components")
return
else:
components = c
out = deepcopy(in1)
out._times = times
out._node = {}
for tp in ['water','gas','tracer1','tracer2']:
out._node[tp] = None
for cp in components:
if format is 'diff':
if len(variables):
out._node[cp] = dict([(n,dict([(v,np.array(in1._node[cp][n][v]) - np.array(in2._node[cp][n][v])) for v in list(in1._node[cp][n].keys()) if v in variables])) for n in in1.nodes])
else:
out._node[cp] = dict([(n,dict([(v,np.array(in1._node[cp][n][v]) - np.array(in2._node[cp][n][v])) for v in list(in1._node[cp][n].keys())])) for n in in1.nodes])
elif format is 'relative':
if len(variables):
out._node[cp] = dict([(n,dict([(v,(np.array(in1._node[cp][n][v]) - np.array(in2._node[cp][n][v]))/np.abs(in2._node[cp][n][v])) for v in list(in1._node[cp][n].keys()) if v in variables])) for n in in1.nodes])
else:
out._node[cp] = dict([(n,dict([(v,(np.array(in1._node[cp][n][v]) - np.array(in2._node[cp][n][v]))/np.abs(in2._node[cp][n][v])) for v in list(in1._node[cp][n].keys())])) for n in in1.nodes])
elif format is 'percent':
if len(variables):
out._node[cp] = dict([(n,dict([(v,100*np.abs((np.array(in1._node[cp][n][v]) - np.array(in2._node[cp][n][v]))/in2._node[cp][n][v])) for v in list(in1._node[cp][n].keys()) if v in variables])) for n in in1.nodes])
else:
out._node[cp] = dict([(n,dict([(v,100*np.abs((np.array(in1._node[cp][n][v]) - np.array(in2._node[cp][n][v]))/in2._node[cp][n][v])) for v in list(in1._node[cp][n].keys())])) for n in in1.nodes])
return out
def sort_tec_files(files):
# sort first by number, then by type
for file in files:
if not file.endswith('.dat'): return files
paths = [os.sep.join(file.split(os.sep)[:-1]) for file in files]
files = [file.split(os.sep)[-1] for file in files]
times = []
for file in files:
for type in ['_days_sca_node','_days_vec_node','_days_hf_node','_days_con_node','_sca_node','_vec_node','_hf_node','_con_node']:
if type in file:
times.append(float('.'.join(file.split(type)[0].split('.')[1:])))
break
times = sorted(enumerate(times), key=lambda x: x[1])
paths = [paths[ind] for ind,time in times]
files = [files[ind] for ind,time in times]
return [path+os.sep+file if path else file for path,file in zip(paths,files)]
| lgpl-2.1 |
neale/CS-program | 434-MachineLearning/assignment4/mdp.py | 1 | 6171 | import sys
import operator
import numpy as np
import matplotlib.pyplot as plt
import itertools, functools
import re
""" Grid Layout
grid[0][0] = num_states
grid[0][1] = num_actions
"""
def load_data():
path = sys.argv[1]
with open(path, 'rb') as f:
#with open('./rl_testdata.csv', 'rb') as f:
train = f.readlines()
train = [line.strip('\n') for line in train]
train = [re.sub(r'[^\x00-\x7f]',r'', line) for line in train]
train[0] = [int(a) for a in train[0].split(' ')]
num_states, num_actions = train[0]
lines = num_actions * num_states + num_actions
grid = []
for i in range(1, lines+(num_actions-1)):
if (i-1) % (num_states+1) is not 0:
grid.append([float(n) for n in train[i].split(' ')[::4]])
train[i] = [float(n) for n in train[i].split(' ')[::4]]
actions = []
for i in range(num_actions):
actions.append(grid[(i*num_states):((1+i)*num_states)])
train = np.array(train)
return train, actions
class MDP(object):
def __init__(self, grid, gamma, actions):
self.grid = grid
self.gamma = gamma
self.num_states, self.num_actions = grid[0]
self.actions = actions
self.rewards = grid[-1]
self.Utility = [x for x in self.rewards]
self.Path = [0]
self.print_attrs()
self.delta = ((1*10**-10)*((1-self.gamma)**2))/(2*(self.gamma**2))
def print_attrs(self):
print "number of states: {}\n".format(self.num_states)
print "number of possible actions: {}\n".format(self.num_actions)
print "rewards per state: {}\n".format(self.rewards)
def Reward(self, state):
return self.rewards[state]
def T(self, state, action, next_state):
""" returns probability of going to state X from state Y """
return self.actions[action][state][next_state]
""" Value Iteration algorithm:
U1(state) = Reward(state)
Ui+1(state) = Reward(state) = gamma*max(for all next states (T(state, action, next_state)(U(i))))
computes the utility of each state when considering all next states
"""
def util(self, state):
p_actions = []
max_p, sum_p = 0, 0
for action in range(self.num_actions):
sum_p = 0
p_actions = []
for next_state in range(self.num_states):
p_actions.append((self.T(state, action, next_state), action, next_state))
for p in p_actions:
sum_p += p[0] * self.Utility[p[2]]
if (sum_p > max_p) or (max_p is 0):
max_p = sum_p
if self.timesteps > 0:
return max_p + self.Reward(state)
else:
return self.gamma*max_p + self.Reward(state)
""" Q iterates through the algorithm until the utility update is less than delta
as the utility of each state is updated, the difference between the old and the
new utility functions can be taken, this is compared against the delta equation
"""
def Q(self) :
finite = 1
max_state = 1
if finite == 0:
# fill in Utility for each
# for infinite horizon
while max_state > self.delta:
max_state = 0
new_util = [0]*self.num_states
next_prob = []
for state in range(self.num_states):
state_util = self.util(state)
if state_util is not None:
max_state = max(max_state, abs(self.Utility[state] - state_util))
new_util[state] = state_util
self.Utility = new_util
else:
# for finite horizon
utilities, policies = [], []
for it in range(10):
for s in range(it):
new_util = [0]*self.num_states
next_prob = []
for state in range(self.num_states):
state_util = self.util(state)
if state_util is not None:
max_state = max(max_state, abs(self.Utility[state] - state_util))
new_util[state] = state_util
self.Utility = new_util
utilities.append(self.Utility)
policies.append(self.policy())
return utilities, policies
return self.Utility
""" finds the best policy based on the current utility function
simply returns the best next state: next state with the highest utility
"""
def policy(self):
proto_policy = []
def argmax(state):
res = {}
for action in range(self.num_actions):
res[action] = 0
self.p_states = []
for next_state in range(self.num_states):
self.p_states.append((self.T(state, action, next_state), action, next_state))
for p in self.p_states:
res[action] += p[0] * self.Utility[p[2]]
return (max(res.items(), key=operator.itemgetter(1))[0] if res else None)
for state in range(self.num_states):
proto_policy.append(argmax(state))
return proto_policy
if __name__ == '__main__':
gamma = 0.1
grid, actions = load_data()
mdp = MDP(grid, gamma, actions)
finite = True
if finite == False:
Utility = mdp.Q()
Policy = mdp.policy()
U = ["%.5f" % v for v in Utility]
P = ["%.5f" % v for v in Policy]
print "**************************************\nEnd Policy: {}\nEnd Value function: {}\n**************************************".format(P, U)
else:
Utility, Policy = mdp.Q()
for i in range(10):
U = ["%.5f" % v for v in Utility[i]]
P = ["%.5f" % v for v in Policy[i]]
print "***********************************"
print "Utility for state {} : {}".format(i, U)
print "Policy for state {} : {}\n**************************************".format(i, P)
| unlicense |
JizhouZhang/SDR | gr-filter/examples/fir_filter_fff.py | 47 | 4014 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_fff(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
CforED/Machine-Learning | sklearn/neural_network/rbm.py | 46 | 12303 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
ldirer/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 20 | 22194 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(category=RuntimeWarning)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
| bsd-3-clause |
keras-team/keras-io | examples/vision/pointnet.py | 1 | 8587 | """
Title: Point cloud classification with PointNet
Author: [David Griffiths](https://dgriffiths3.github.io)
Date created: 2020/05/25
Last modified: 2020/05/26
Description: Implementation of PointNet for ModelNet10 classification.
"""
"""
# Point cloud classification
"""
"""
## Introduction
Classification, detection and segmentation of unordered 3D point sets i.e. point clouds
is a core problem in computer vision. This example implements the seminal point cloud
deep learning paper [PointNet (Qi et al., 2017)](https://arxiv.org/abs/1612.00593). For a
detailed intoduction on PointNet see [this blog
post](https://medium.com/@luis_gonzales/an-in-depth-look-at-pointnet-111d7efdaa1a).
"""
"""
## Setup
If using colab first install trimesh with `!pip install trimesh`.
"""
import os
import glob
import trimesh
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from matplotlib import pyplot as plt
tf.random.set_seed(1234)
"""
## Load dataset
We use the ModelNet10 model dataset, the smaller 10 class version of the ModelNet40
dataset. First download the data:
"""
DATA_DIR = tf.keras.utils.get_file(
"modelnet.zip",
"http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip",
extract=True,
)
DATA_DIR = os.path.join(os.path.dirname(DATA_DIR), "ModelNet10")
"""
We can use the `trimesh` package to read and visualize the `.off` mesh files.
"""
mesh = trimesh.load(os.path.join(DATA_DIR, "chair/train/chair_0001.off"))
mesh.show()
"""
To convert a mesh file to a point cloud we first need to sample points on the mesh
surface. `.sample()` performs a unifrom random sampling. Here we sample at 2048 locations
and visualize in `matplotlib`.
"""
points = mesh.sample(2048)
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111, projection="3d")
ax.scatter(points[:, 0], points[:, 1], points[:, 2])
ax.set_axis_off()
plt.show()
"""
To generate a `tf.data.Dataset()` we need to first parse through the ModelNet data
folders. Each mesh is loaded and sampled into a point cloud before being added to a
standard python list and converted to a `numpy` array. We also store the current
enumerate index value as the object label and use a dictionary to recall this later.
"""
def parse_dataset(num_points=2048):
train_points = []
train_labels = []
test_points = []
test_labels = []
class_map = {}
folders = glob.glob(os.path.join(DATA_DIR, "[!README]*"))
for i, folder in enumerate(folders):
print("processing class: {}".format(os.path.basename(folder)))
# store folder name with ID so we can retrieve later
class_map[i] = folder.split("/")[-1]
# gather all files
train_files = glob.glob(os.path.join(folder, "train/*"))
test_files = glob.glob(os.path.join(folder, "test/*"))
for f in train_files:
train_points.append(trimesh.load(f).sample(num_points))
train_labels.append(i)
for f in test_files:
test_points.append(trimesh.load(f).sample(num_points))
test_labels.append(i)
return (
np.array(train_points),
np.array(test_points),
np.array(train_labels),
np.array(test_labels),
class_map,
)
"""
Set the number of points to sample and batch size and parse the dataset. This can take
~5minutes to complete.
"""
NUM_POINTS = 2048
NUM_CLASSES = 10
BATCH_SIZE = 32
train_points, test_points, train_labels, test_labels, CLASS_MAP = parse_dataset(
NUM_POINTS
)
"""
Our data can now be read into a `tf.data.Dataset()` object. We set the shuffle buffer
size to the entire size of the dataset as prior to this the data is ordered by class.
Data augmentation is important when working with point cloud data. We create a
augmentation function to jitter and shuffle the train dataset.
"""
def augment(points, label):
# jitter points
points += tf.random.uniform(points.shape, -0.005, 0.005, dtype=tf.float64)
# shuffle points
points = tf.random.shuffle(points)
return points, label
train_dataset = tf.data.Dataset.from_tensor_slices((train_points, train_labels))
test_dataset = tf.data.Dataset.from_tensor_slices((test_points, test_labels))
train_dataset = train_dataset.shuffle(len(train_points)).map(augment).batch(BATCH_SIZE)
test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE)
"""
### Build a model
Each convolution and fully-connected layer (with exception for end layers) consits of
Convolution / Dense -> Batch Normalization -> ReLU Activation.
"""
def conv_bn(x, filters):
x = layers.Conv1D(filters, kernel_size=1, padding="valid")(x)
x = layers.BatchNormalization(momentum=0.0)(x)
return layers.Activation("relu")(x)
def dense_bn(x, filters):
x = layers.Dense(filters)(x)
x = layers.BatchNormalization(momentum=0.0)(x)
return layers.Activation("relu")(x)
"""
PointNet consists of two core components. The primary MLP network, and the transformer
net (T-net). The T-net aims to learn an affine transformation matrix by its own mini
network. The T-net is used twice. The first time to transform the input features (n, 3)
into a canonical representation. The second is an affine transformation for alignment in
feature space (n, 3). As per the original paper we constrain the transformation to be
close to an orthogonal matrix (i.e. ||X*X^T - I|| = 0).
"""
class OrthogonalRegularizer(keras.regularizers.Regularizer):
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.eye = tf.eye(num_features)
def __call__(self, x):
x = tf.reshape(x, (-1, self.num_features, self.num_features))
xxt = tf.tensordot(x, x, axes=(2, 2))
xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))
return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye))
"""
We can then define a general function to build T-net layers.
"""
def tnet(inputs, num_features):
# Initalise bias as the indentity matrix
bias = keras.initializers.Constant(np.eye(num_features).flatten())
reg = OrthogonalRegularizer(num_features)
x = conv_bn(inputs, 32)
x = conv_bn(x, 64)
x = conv_bn(x, 512)
x = layers.GlobalMaxPooling1D()(x)
x = dense_bn(x, 256)
x = dense_bn(x, 128)
x = layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=bias,
activity_regularizer=reg,
)(x)
feat_T = layers.Reshape((num_features, num_features))(x)
# Apply affine transformation to input features
return layers.Dot(axes=(2, 1))([inputs, feat_T])
"""
The main network can be then implemented in the same manner where the t-net mini models
can be dropped in a layers in the graph. Here we replicate the network architecture
published in the original paper but with half the number of weights at each layer as we
are using the smaller 10 class ModelNet dataset.
"""
inputs = keras.Input(shape=(NUM_POINTS, 3))
x = tnet(inputs, 3)
x = conv_bn(x, 32)
x = conv_bn(x, 32)
x = tnet(x, 32)
x = conv_bn(x, 32)
x = conv_bn(x, 64)
x = conv_bn(x, 512)
x = layers.GlobalMaxPooling1D()(x)
x = dense_bn(x, 256)
x = layers.Dropout(0.3)(x)
x = dense_bn(x, 128)
x = layers.Dropout(0.3)(x)
outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs, name="pointnet")
model.summary()
"""
### Train model
Once the model is defined it can be trained like any other standard classification model
using `.compile()` and `.fit()`.
"""
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=0.001),
metrics=["sparse_categorical_accuracy"],
)
model.fit(train_dataset, epochs=20, validation_data=test_dataset)
"""
## Visualize predictions
We can use matplotlib to visualize our trained model performance.
"""
data = test_dataset.take(1)
points, labels = list(data)[0]
points = points[:8, ...]
labels = labels[:8, ...]
# run test data through model
preds = model.predict(points)
preds = tf.math.argmax(preds, -1)
points = points.numpy()
# plot points with predicted class and label
fig = plt.figure(figsize=(15, 10))
for i in range(8):
ax = fig.add_subplot(2, 4, i + 1, projection="3d")
ax.scatter(points[i, :, 0], points[i, :, 1], points[i, :, 2])
ax.set_title(
"pred: {:}, label: {:}".format(
CLASS_MAP[preds[i].numpy()], CLASS_MAP[labels.numpy()[i]]
)
)
ax.set_axis_off()
plt.show()
| apache-2.0 |
liangz0707/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
tpoisot/qmod | qmod.py | 1 | 3613 | #!/usr/bin/python2
import networkx as nx
import scipy as sp
import numpy as np
import random
import matplotlib.pyplot as plt
import sys
import json
## Data
def prepareData(fname):
mat = np.abs(np.loadtxt(fname))
G = nx.from_numpy_matrix(mat, create_using = nx.DiGraph())
return G
## Delta function
def delta(a, b):
if a == b:
return 1
return 0
## Weigthed modularity
def Qq(G):
Qq = 0.0
sum_of_links = np.sum([e[2]['weight'] for e in G.edges(data=True)])
marginals = []
for l in G.edges(data=True):
n1 = G.node[l[0]]
n2 = G.node[l[1]]
if delta(n1['label'], n2['label']) == 0:
marginals.append(0)
else :
w_l = l[2]['weight']
suc = G.successors(l[0])
pre = G.predecessors(l[1])
w_i = np.sum([G[l[0]][s]['weight'] for s in suc])
w_j = np.sum([G[p][l[1]]['weight'] for p in pre])
marginals.append(w_l/float(sum_of_links) - (w_i*w_l)/float(sum_of_links**2.0))
return np.sum(marginals)
## weigthed pick function
def wsample(d):
# need a dict {'value': weight}
r = random.uniform(0, sum(d.itervalues()))
s = 0.0
for k, w in d.iteritems():
s+= w
if r < s : return k
return k
## label propagation function
def qlp(G,steps=100):
optim = {}
# We first initialize the labels
labid = {}
for n in G:
labid[n] = n
nx.set_node_attributes(G, 'label', labid)
# We print the first modularity value
optim["0"]={'Q':Qq(G),'labels':{str(n):G.node[n]['label'] for n in G}}
# Now we can start a number of iterations
for i in xrange(steps):
# The nodes propagate their labels in a random order
updateorder = range(G.number_of_nodes())
random.shuffle(updateorder)
for updated in updateorder:
# First we pick the outgoing edges
out_edges = G.edges([updated], data=True)
if len(out_edges) > 0:
pick_prob = {}
for out_e in out_edges:
pick_prob[out_e[1]] = out_e[2]['weight']
# We pick at random according to the weight
receiver = wsample(pick_prob)
G.node[receiver]['label'] = G.node[updated]['label']
optim[str(i)]={'Q':Qq(G),'labels':{str(n):G.node[n]['label'] for n in G}}
return optim
def pickBestPartition(run):
# Returns the best partition (i.e. higher modularity score)
best_partition = {}
best_q = 0.0
for k in run.keys():
if run[k]['Q'] > best_q:
best_partition = run[k]
return best_partition
def speciesImpactByRemoval(G, steps):
Impact = {}
for n in G.nodes():
tG = G.copy()
tG.remove_node(n)
Out = qlp(tG, steps)
Impact[str(n)] = pickBestPartition(Out)
return Impact
def analyzeFile(fname, steps):
G = prepareData(fname)
mod = qlp(G, steps)
## Analyse modularity
out = open(fname+'.json', 'w')
out.write(json.dumps(mod, out, sort_keys=True))
out.close()
best_partition = pickBestPartition(mod)
out = open(fname+'.best.json', 'w')
out.write(json.dumps(best_partition, out, sort_keys=True))
out.close()
## Test species impact by removal
sp_imp_rem = speciesImpactByRemoval(G, steps)
out = open(fname+'.rem.json', 'w')
out.write(json.dumps(sp_imp_rem, out, sort_keys=True))
out.close()
return 0
if __name__ == "__main__":
#TODO Need to add an help in case the args are mot O.K.
# Read arguments
prefix = str(sys.argv[1])
steps = int(sys.argv[2])
# Read binary file
analyzeFile(prefix+'.bnr', steps)
# Read quantitative prefix
analyzeFile(prefix+'.qnt', steps)
| gpl-2.0 |
sh4wn/vispy | vispy/mpl_plot/_mpl_to_vispy.py | 17 | 6911 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
import base64
import warnings
try:
import matplotlib.pyplot as plt
from ..ext.mplexporter import Exporter, Renderer
except ImportError as exp:
Exporter = None
Renderer = object
has_mplexporter = False
why_not = str(exp)
else:
has_mplexporter = True
why_not = None
from ..ext.six import BytesIO
from ..color import Color
from ..io import read_png
from ..scene.visuals import Line, Markers, Text, Image
from ..scene.widgets import ViewBox
from ..visuals.transforms import STTransform
from ..scene import SceneCanvas, PanZoomCamera
from ..testing import has_matplotlib
def _check_coords(coords, valid):
if coords not in valid:
raise RuntimeError('Coords must be %s, not %s' % (valid, coords))
class VispyRenderer(Renderer):
def __init__(self, *args, **kwargs):
self._line_count = 0
self._axs = {}
Renderer.__init__(self, *args, **kwargs)
def open_figure(self, fig, props):
self._dpi = props['dpi']
size = (props['figwidth'] * self._dpi,
props['figheight'] * self._dpi)
self.canvas = SceneCanvas(size=size, show=True, keys='interactive',
bgcolor='lightgray')
@self.canvas.events.resize.connect
def on_resize(event):
self._resize(*event.size)
self.canvas.events.resize.connect(on_resize)
def close_figure(self, fig):
# self.canvas.close()
pass # don't do this, it closes when done rendering
def open_axes(self, ax, props):
bounds = np.array(props['bounds'])
bounds[1] = 1. - bounds[1] - bounds[3]
xlim = props['xlim']
ylim = props['ylim']
# for a in props['axes']:
# a['position'] # add borders
vb = ViewBox(parent=self.canvas.scene, border_color='black',
bgcolor=props['axesbg'])
vb.camera = PanZoomCamera()
vb.camera.set_range(xlim, ylim, margin=0)
ax_dict = dict(ax=ax, bounds=bounds, vb=vb, lims=xlim+ylim)
self._axs[ax] = ax_dict
self._resize(*self.canvas.size)
def _resize(self, w, h):
for ax in self._axs.values():
ax['vb'].pos = (w * ax['bounds'][0], h * ax['bounds'][1])
ax['vb'].size = (w * ax['bounds'][2], h * ax['bounds'][3])
def close_axes(self, ax):
# self._axs.pop(ax)['vb'].parent = []
pass # don't do anything, or all plots get closed (!)
def open_legend(self, legend, props):
raise NotImplementedError('Legends not supported yet')
def close_legend(self, legend):
pass
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
_check_coords(coordinates, 'data')
imdata = read_png(BytesIO(base64.b64decode(imdata.encode('utf-8'))))
assert imdata.ndim == 3 and imdata.shape[2] == 4
imdata[:, :, 3] = (imdata[:, :, 3] *
(style['alpha'] if style['alpha'] is not None
else 1.)).astype(np.uint8)
img = Image(imdata)
vb = self._mpl_ax_to(mplobj)
img.transform = STTransform.from_mapping([[0, 0], img.size],
[[extent[0], extent[3]],
[extent[1], extent[2]]])
img.parent = vb.scene
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
_check_coords(coordinates, 'data')
color = Color(style['color'])
color.alpha = style['alpha']
color = color.rgba
text = Text(text, color=color, pos=position,
font_size=style['fontsize'], rotation=style['rotation'],
anchor_x=style['halign'], anchor_y=style['valign'])
text.parent = self._mpl_ax_to(mplobj).scene
def draw_markers(self, data, coordinates, style, label, mplobj=None):
_check_coords(coordinates, 'data')
edge_color = Color(style['edgecolor'])
edge_color.alpha = style['alpha']
face_color = Color(style['facecolor'])
face_color.alpha = style['alpha']
markers = Markers()
markers.set_data(data, face_color=face_color, edge_color=edge_color,
size=style['markersize'], symbol=style['marker'])
markers.parent = self._mpl_ax_to(mplobj).scene
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
_check_coords(coordinates, 'data')
if offset is not None:
raise NotImplementedError('cannot handle offset')
_check_coords(offset_coordinates, 'data')
# TODO --, :, etc.
color = Color(style['edgecolor'])
color.alpha = style['alpha']
line = Line(data, color=color, width=style['edgewidth'],
method='gl') # XXX Looks bad with agg :(
line.parent = self._mpl_ax_to(mplobj).scene
def _mpl_ax_to(self, mplobj, output='vb'):
"""Helper to get the parent axes of a given mplobj"""
for ax in self._axs.values():
if ax['ax'] is mplobj.axes:
return ax[output]
raise RuntimeError('Parent axes could not be found!')
def _vispy_done(self):
"""Things to do once all objects have been collected"""
self._resize(*self.canvas.size)
# def draw_path_collection(...) TODO add this for efficiency
# https://github.com/mpld3/mplexporter/blob/master/
# mplexporter/renderers/base.py
def _mpl_to_vispy(fig):
"""Convert a given matplotlib figure to vispy
This function is experimental and subject to change!
Requires matplotlib and mplexporter.
Parameters
----------
fig : instance of matplotlib Figure
The populated figure to display.
Returns
-------
canvas : instance of Canvas
The resulting vispy Canvas.
"""
renderer = VispyRenderer()
exporter = Exporter(renderer)
with warnings.catch_warnings(record=True): # py3k mpl warning
exporter.run(fig)
renderer._vispy_done()
return renderer.canvas
def show(block=False):
"""Show current figures using vispy
Parameters
----------
block : bool
If True, blocking mode will be used. If False, then non-blocking
/ interactive mode will be used.
Returns
-------
canvases : list
List of the vispy canvases that were created.
"""
if not has_matplotlib():
raise ImportError('Requires matplotlib version >= 1.2')
cs = [_mpl_to_vispy(plt.figure(ii)) for ii in plt.get_fignums()]
if block and len(cs) > 0:
cs[0].app.run()
return cs
| bsd-3-clause |
pranavtbhat/EE219 | project4/part2.py | 1 | 1053 | from sklearn import metrics
from sklearn.cluster import KMeans
import part1
def print_confusion_matrix(actual, predicted):
print "Confusion Matrix is ", metrics.confusion_matrix(actual, predicted)
def print_scores(actual_labels, predicted_labels):
print("Homogeneity: %0.3f" % metrics.homogeneity_score(actual_labels, predicted_labels))
print("Completeness: %0.3f" % metrics.completeness_score(actual_labels, predicted_labels))
print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(actual_labels, predicted_labels))
print("Adjusted Mutual info score: %.3f" % metrics.adjusted_mutual_info_score(actual_labels, predicted_labels))
if __name__ == "__main__":
categories = part1.fetch_categories()
data = part1.fetch_all(categories)
data_idf = part1.get_data_idf()
labels = data.target//4 #Since we want to cluster to 2 classes, and the input has 8 classes (0-7)
kmeans = KMeans(n_clusters=2).fit(data_idf)
print_confusion_matrix(labels, kmeans.labels_)
print_scores(labels, kmeans.labels_)
| unlicense |
RobertABT/heightmap | build/matplotlib/examples/event_handling/legend_picking.py | 8 | 1292 | """
Enable picking on the legend to toggle the legended line on and off
"""
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 0.2, 0.1)
y1 = 2*np.sin(2*np.pi*t)
y2 = 4*np.sin(2*np.pi*2*t)
fig, ax = plt.subplots()
ax.set_title('Click on legend line to toggle line on/off')
line1, = ax.plot(t, y1, lw=2, color='red', label='1 HZ')
line2, = ax.plot(t, y2, lw=2, color='blue', label='2 HZ')
leg = ax.legend(loc='upper left', fancybox=True, shadow=True)
leg.get_frame().set_alpha(0.4)
# we will set up a dict mapping legend line to orig line, and enable
# picking on the legend line
lines = [line1, line2]
lined = dict()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
| mit |
msrconsulting/atm-py | atmPy/atmos/vertical_profile.py | 6 | 1500 | import pandas as pd
import pylab as plt
from atmPy.atmos import timeseries
from atmPy.tools import plt_tools
class VerticalProfile(object):
def __init__(self, data):
self.data = data
def plot(self, ax=False, color=False):
if not ax:
f, a = plt.subplots()
else:
a = ax
# f = a.get_figure()
if type(color) == bool:
if not color:
color = plt_tools.color_cycle[0]
a.plot(self.data.values, self.data.index, color=color, linewidth=2)
# print(plt_tools.color_cycle[0])
a.set_ylabel('Altitude (m)')
a.set_ylim((self.data.index.min(), self.data.index.max()))
return a
def save(self, fname):
self.data.to_csv(fname)
def convert2timeseries(self, ts):
"""merges a vertical profile with a timeseries that contains height data
and returns the a time series where the data of the vertical profile is interpolated
along the time of the timeseries.
Arguments
---------
ts: timeseries"""
hk_tmp = ts.convert2verticalprofile()
data = hk_tmp.data[['TimeUTC']]
cat_sort_int = pd.concat([data, self.data]).sort_index().interpolate()
cat_sort_int = cat_sort_int.dropna()
cat_sort_int.index = cat_sort_int.TimeUTC
cat_sort_int = cat_sort_int.drop('TimeUTC', axis=1)
return timeseries.TimeSeries(cat_sort_int)
| mit |
255BITS/hyperchamber | examples/shared/cifar_utils.py | 1 | 3459 | import numpy as np
import tensorflow as tf
import tarfile
import os
import sys
from six.moves import xrange
from six.moves import urllib
import matplotlib
import matplotlib.pyplot as plt
from scipy.misc import imsave
from tensorflow.models.image.cifar10 import cifar10_input
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = cifar10_input.read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
IMAGE_SIZE=32
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
#float_image = tf.image.per_image_whitening(resized_image)
float_image = resized_image / 127.5 - 1.
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return cifar10_input._generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = "/tmp/cifar"
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def plot(config, image, file):
""" Plot a single CIFAR image."""
image = np.squeeze(image)
print(file, image.shape)
imsave(file, image)
| mit |
qxcv/comp2550 | project/seminar/plot_map.py | 1 | 1686 | #!/usr/bin/env python2
"""Plot a nice image of map likelihood"""
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
import numpy as np
from os.path import abspath
from sys import path
path.append(abspath('../'))
from map import Map
from observation import coordinate_projector
from settings import KARLSRUHE_CENTER
# Samples per metre
SAMPLE_DENSITY = 1.0
EXTENTS = [612728, 613269, 4.11345e6, 4.11388e6]
EXTENTS = [612728, 613069, 4.11345e6, 4.11368e6]
MAP_PATH = '../data/kitti/00.osm.bz2'
if __name__ == '__main__':
# Get map
print("Loading map")
proj = coordinate_projector(KARLSRUHE_CENTER)
m = Map(MAP_PATH, proj)
# Produce grid
print("Producing grid")
x_samples = int(EXTENTS[1] - EXTENTS[0] / SAMPLE_DENSITY)
y_samples = int(EXTENTS[3] - EXTENTS[2] / SAMPLE_DENSITY)
x_range = np.linspace(EXTENTS[0], EXTENTS[1], x_samples)
y_range = np.linspace(EXTENTS[2], EXTENTS[3], y_samples)
X, Y = np.meshgrid(x_range, y_range)
# Take samples
print("Sampling")
dists = np.zeros((y_samples, x_samples))
for i in xrange(x_samples):
for j in xrange(y_samples):
pos = (X[j, i], Y[j, i])
dists[j, i] = m.nearest_lane_dist(pos)
results = np.log(1.0 / (1 + dists**2))
# Plot
print("Plotting")
# ax = plt.figure().gca(projection='3d')
# ax.plot_surface(X, Y, results, cmap='coolwarm')
plt.imshow(results, extent=EXTENTS).set_cmap('gist_heat')
plt.colorbar()
plt.xlabel('$x$-coordinate (m)')
plt.ylabel('$y$-coordinate (m)')
plt.title(r'Log-likelihood')
plt.tight_layout()
plt.savefig('images/map-likelihood.png')
| apache-2.0 |
cbmoore/statsmodels | statsmodels/sandbox/tsa/try_arma_more.py | 34 | 3744 | # -*- coding: utf-8 -*-
"""Periodograms for ARMA and time series
theoretical periodogram of ARMA process and different version
of periodogram estimation
uses scikits.talkbox and matplotlib
Created on Wed Oct 14 23:02:19 2009
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import signal, ndimage
import matplotlib.mlab as mlb
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_process import arma_generate_sample, arma_periodogram
from statsmodels.tsa.stattools import acovf
hastalkbox = False
try:
import scikits.talkbox as stb
import scikits.talkbox.spectral.basic as stbs
except:
hastalkbox = False
ar = [1., -0.7]#[1,0,0,0,0,0,0,-0.7]
ma = [1., 0.3]
ar = np.convolve([1.]+[0]*50 +[-0.6], ar)
ar = np.convolve([1., -0.5]+[0]*49 +[-0.3], ar)
n_startup = 1000
nobs = 1000
# throwing away samples at beginning makes sample more "stationary"
xo = arma_generate_sample(ar,ma,n_startup+nobs)
x = xo[n_startup:]
#moved to tsa.arima_process
#def arma_periodogram(ar, ma, **kwds):
# '''periodogram for ARMA process given by lag-polynomials ar and ma
#
# Parameters
# ----------
# ar : array_like
# autoregressive lag-polynomial with leading 1 and lhs sign
# ma : array_like
# moving average lag-polynomial with leading 1
# kwds : options
# options for scipy.signal.freqz
# default: worN=None, whole=0
#
# Returns
# -------
# w : array
# frequencies
# sd : array
# periodogram, spectral density
#
# Notes
# -----
# Normalization ?
#
# '''
# w, h = signal.freqz(ma, ar, **kwds)
# sd = np.abs(h)**2/np.sqrt(2*np.pi)
# if np.sum(np.isnan(h)) > 0:
# # this happens with unit root or seasonal unit root'
# print 'Warning: nan in frequency response h'
# return w, sd
plt.figure()
plt.plot(x)
rescale = 0
w, h = signal.freqz(ma, ar)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h')
h[np.isnan(h)] = 1.
rescale = 0
#replace with signal.order_filter ?
pm = ndimage.filters.maximum_filter(sd, footprint=np.ones(5))
maxind = np.nonzero(pm == sd)
print('local maxima frequencies')
wmax = w[maxind]
sdmax = sd[maxind]
plt.figure()
plt.subplot(2,3,1)
if rescale:
plt.plot(w, sd/sd[0], '-', wmax, sdmax/sd[0], 'o')
# plt.plot(w, sd/sd[0], '-')
# plt.hold()
# plt.plot(wmax, sdmax/sd[0], 'o')
else:
plt.plot(w, sd, '-', wmax, sdmax, 'o')
# plt.hold()
# plt.plot(wmax, sdmax, 'o')
plt.title('DGP')
sdm, wm = mlb.psd(x)
sdm = sdm.ravel()
pm = ndimage.filters.maximum_filter(sdm, footprint=np.ones(5))
maxind = np.nonzero(pm == sdm)
plt.subplot(2,3,2)
if rescale:
plt.plot(wm,sdm/sdm[0], '-', wm[maxind], sdm[maxind]/sdm[0], 'o')
else:
plt.plot(wm, sdm, '-', wm[maxind], sdm[maxind], 'o')
plt.title('matplotlib')
if hastalkbox:
sdp, wp = stbs.periodogram(x)
plt.subplot(2,3,3)
if rescale:
plt.plot(wp,sdp/sdp[0])
else:
plt.plot(wp, sdp)
plt.title('stbs.periodogram')
xacov = acovf(x, unbiased=False)
plt.subplot(2,3,4)
plt.plot(xacov)
plt.title('autocovariance')
nr = len(x)#*2/3
#xacovfft = np.fft.fft(xacov[:nr], 2*nr-1)
xacovfft = np.fft.fft(np.correlate(x,x,'full'))
#abs(xacovfft)**2 or equivalently
xacovfft = xacovfft * xacovfft.conj()
plt.subplot(2,3,5)
if rescale:
plt.plot(xacovfft[:nr]/xacovfft[0])
else:
plt.plot(xacovfft[:nr])
plt.title('fft')
if hastalkbox:
sdpa, wpa = stbs.arspec(x, 50)
plt.subplot(2,3,6)
if rescale:
plt.plot(wpa,sdpa/sdpa[0])
else:
plt.plot(wpa, sdpa)
plt.title('stbs.arspec')
#plt.show()
| bsd-3-clause |
OwaJawa/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_big256.py | 7 | 17443 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_big256.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_big256.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=256, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
pieleric/odemis | src/odemis/model/_metadata.py | 2 | 13576 | # -*- coding: utf-8 -*-
'''
Created on 2 Apr 2012
@author: Éric Piel
Copyright © 2012-2014 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
These are the conventional metadata available in a DataArray.
'''
# This list of constants are used as key for the metadata
MD_EXP_TIME = "Exposure time" # s
MD_ACQ_DATE = "Acquisition date" # s since epoch
MD_AD_LIST = "Acquisition dates" # s since epoch for each element in dimension T
# distance between two points on the sample that are seen at the centre of two
# adjacent pixels considering that these two points are in focus
MD_PIXEL_SIZE = "Pixel size" # (m, m) or (m, m, m) if the data has XY or XYZ dimensions
MD_SHEAR = "Shear" # float, vertical shear (0, means no shearing)
MD_FLIP = "Flip"
MD_BINNING = "Binning" # (px, px), number of pixels acquired as one big pixel, in each dimension
MD_INTEGRATION_COUNT = "Integration Count" # number of samples/images acquired/integrated; default: 1
MD_HW_VERSION = "Hardware version" # str
MD_SW_VERSION = "Software version" # str
MD_HW_NAME = "Hardware name" # str, product name of the hardware component (and s/n)
MD_GAIN = "Gain" # no unit (ratio) voltage multiplication provided by the gain (for CCD/CMOS)
MD_BPP = "Bits per pixel" # bit
MD_DIMS = "Dimension names" # str, name of each dimension in the order of the shape. The default is CTZYX (with the first dimensions dropped if shape is smaller). YXC is useful for RGB(A) data
MD_BASELINE = "Baseline value" # ADU, int or float (same as image data) representing the average value when no signal is received (default is the lowest representable number or 0 for floats)
MD_READOUT_TIME = "Pixel readout time" # s, time to read one pixel (on a CCD/CMOS)
MD_SENSOR_PIXEL_SIZE = "Sensor pixel size" # (m, m), distance between the centre of 2 pixels on the detector sensor
MD_SENSOR_SIZE = "Sensor size" # px, px, maximum resolution that can be acquire by the detector
MD_SENSOR_TEMP = "Sensor temperature" # C
MD_POS = "Centre position" # (m, m) or (m, m, m) if the data has XY or XYZ dimensions.
# It's the location of the picture *centre*. X goes "right" (ie, pixel index increases),
# Y goes "up" (ie, pixel index decreases), and Z goes "top" (ie, pixel index increases).
# Note that for angular resolved acquisitions, MD_POS corresponds to the position of the e-beam on the sample
MD_ROTATION = "Rotation" # radians (0<=float<2*PI) rotation applied to the image (from its center) counter-clockwise
# Note that the following two might be a set of ranges
MD_PIVOT_POS = "Pivot position for controllers with rotational axes" # (dict str->float) axis -> pos:
# Used in SmarAct motion controllers
MD_IN_WL = "Input wavelength range" # (m, m) or (m, m, m, m, m), lower and upper range of the wavelength input
MD_OUT_WL = "Output wavelength range" # (m, m) or (m, m, m, m, m), lower and upper range of the filtered wavelength before the camera
MD_LIGHT_POWER = "Light power" # W, power of the emitting light
MD_LENS_NAME = "Lens name" # str, product name of the lens
MD_LENS_MAG = "Lens magnification" # float (ratio), magnification factor
MD_LENS_NA = "Lens numerical aperture" # float (ratio), numerical aperture
MD_LENS_RI = "Lens refractive index" # float (ratio), refractive index
MD_FILTER_NAME = "Filter name" # str, product name of the light filter
# TODO: might need to merge DWELL_TIME and EXP_TIME into INTEGRATION_TIME: the time each pixel receive energy
# + SCANNED_DIMENSIONS: list of dimensions which were scanned instead of being acquired simultaneously
MD_DWELL_TIME = "Pixel dwell time" # s (float), time the electron beam spends per pixel
MD_EBEAM_VOLTAGE = "Electron beam acceleration voltage" # V (float), voltage used to accelerate the electron beam
MD_EBEAM_CURRENT = "Electron beam emission current" # A (float), emission current of the electron beam (typically, the probe current is a bit smaller and the spot diameter is linearly proportional)
MD_EBEAM_SPOT_DIAM = "Electron beam spot diameter" # m (float), approximate diameter of the electron beam spot (typically function of the current)
MD_STREAK_TIMERANGE = "Streak Time Range" # (s) Time range for one streak/sweep
MD_STREAK_MCPGAIN = "Streak MCP Gain" # (int) Multiplying gain for microchannel plate
MD_STREAK_MODE = "Streak Mode" # (bool) Mode of streak camera (Focus (Off) or Operate (On))
MD_TRIGGER_DELAY = "Streak Trigger Delay" # (float) Delay A between ext. trigger and starting of the streak/sweeping
MD_TRIGGER_RATE = "Streak Repetition Rate" # (Hz) Repetition Rate of the trigger signal
# This one is a kind of a hack, to store the evolution of the current over the time
# of an acquisition.
# tuple of (float, float) -> s since epoch, A
# The entries should be ordered by time (the earliest the first)
MD_EBEAM_CURRENT_TIME = "Electron beam emission current over time"
MD_WL_LIST = "Wavelength list" # m... (list of float), wavelength for each pixel. The list is the same length as the C dimension
MD_TIME_LIST = "Time list" # sec (array) containing the corrections for the timestamp corresponding to each px
# Deprecrated: use MD_TIME_LIST
MD_PIXEL_DUR = "Pixel duration" # Time duration of a 'pixel' along the time dimension
MD_TIME_OFFSET = "Time offset" # Time of the first 'pixel' in the time dimension (added to ACQ_DATE), default is 0
MD_ACQ_TYPE = "Acquisition type" # the type of acquisition contained in the DataArray
# The following tags are to be used as the values of MD_ACQ_TYPE
MD_AT_SPECTRUM = "Spectrum"
MD_AT_AR = "Angle-resolved"
MD_AT_EM = "Electron microscope"
MD_AT_FLUO = "Fluorescence"
MD_AT_ANCHOR = "Anchor region"
MD_AT_CL = "Cathodoluminescence"
MD_AT_OVV_FULL = "Full overview"
MD_AT_OVV_TILES = "Built-up overview"
MD_AT_HISTORY = "History"
MD_AT_TEMPSPECTRUM = "Temporal Spectrum"
MD_AT_TEMPORAL = "Temporal"
MD_AT_SLIT = "Slit view" # View of the spectrograph slit for SPARCv2 alignment
MD_AR_POLE = "Angular resolved pole position" # px, px (tuple of float), position of pole (aka hole center) in raw acquisition of SPARC AR
MD_AR_XMAX = "Polar xmax" # m, the distance between the parabola origin and the cutoff position
MD_AR_HOLE_DIAMETER = "Hole diameter" # m, diameter the hole in the mirror
MD_AR_FOCUS_DISTANCE = "Focus distance" # m, the vertical mirror cutoff, iow the min distance between the mirror and the sample
MD_AR_PARABOLA_F = "Parabola parameter" # m, parabola_parameter=1/4f
MD_POL_MODE = "Polarization" # (string), position of the polarization analyzer (see POL_POSITIONS in _base.py)
MD_POL_POS_QWP = "Position quarter wave plate" # rad, position of the quarter wave plate
MD_POL_POS_LINPOL = "Position linear polarizer" # rad, position of the linear polarizer
# MD_POL_MODE values
MD_POL_NONE = "pass-through" # (str) no (specific) polarization
MD_POL_HORIZONTAL = "horizontal" # (str) polarization analyzer position
MD_POL_VERTICAL = "vertical" # (str) polarization analyzer position
MD_POL_POSDIAG = "posdiag" # (str) polarization analyzer position
MD_POL_NEGDIAG = "negdiag" # (str) polarization analyzer position
MD_POL_RHC = "rhc" # (str) polarization analyzer position
MD_POL_LHC = "lhc" # (str) polarization analyzer position
MD_POL_S0 = "S0" # (str) Stokes parameter sample plane S0
MD_POL_S1 = "S1" # (str) Stokes parameter sample plane S1
MD_POL_S2 = "S2" # (str) Stokes parameter sample plane S2
MD_POL_S3 = "S3" # (str) Stokes parameter sample plane S3
MD_POL_S1N = "S1N" # (str) Stokes parameter sample plane S1 normalized by S0
MD_POL_S2N = "S2N" # (str) Stokes parameter sample plane S2 normalized by S0
MD_POL_S3N = "S3N" # (st) Stokes parameter sample plane S3 normalized by S0
MD_POL_DS0 = "DS0" # (string) Stokes parameter detector plane DS0
MD_POL_DS1 = "DS1" # (str) Stokes parameter detector plane DS1
MD_POL_DS2 = "DS2" # (str) Stokes parameter detector plane DS2
MD_POL_DS3 = "DS3" # (str) Stokes parameter detector plane DS3
MD_POL_DS1N = "DS1N" # (str) Stokes parameter detector plane DS1 normalized by DS0
MD_POL_DS2N = "DS2N" # (str) Stokes parameter detector plane DS2 normalized by DS0
MD_POL_DS3N = "DS3N" # (str) Stokes parameter detector plane DS3 normalized by DS0
MD_POL_EPHI = "Ephi" # (str) Electrical field amplitude Ephi
MD_POL_ETHETA = "Etheta" # (str) Electrical field amplitude Etheta
MD_POL_EX = "Ex" # (str) Electrical field amplitude Ex
MD_POL_EY = "Ey" # (str) Electrical field amplitude Ey
MD_POL_EZ = "Ez" # (str) Electrical field amplitude Ez
MD_POL_DOP = "DOP" # (str) Degree of polarization DOP
MD_POL_DOLP = "DOLP" # (str) Degree of linear polarization DOLP
MD_POL_DOCP = "DOCP" # (str) Degree of circular polarization DOCP
MD_POL_UP = "UP" # (str) Degree of unpolarized light UP
MD_DET_TYPE = "Detector type"
# The following tags are to be used as the values of MD_DET_TYPE
MD_DT_NORMAL = "Detector normal" # The detector sends the same level of signal independent of the acq duration (eg, ETD)
MD_DT_INTEGRATING = "Detector integrating" # The detector level is proportional to the acq duration (eg, CCD)
# The following tags are not to be filled at acquisition, but by the user interface
MD_DESCRIPTION = "Description" # (string) User-friendly name that describes what this acquisition is
MD_USER_NOTE = "User note" # (string) Whatever comment the user has added to the image
MD_USER_TINT = "Display tint" # Either RGB (3-tuple of 0<int<255): colour to display the (greyscale) image or a matplotlib.colors.Colormap name
MD_HW_NOTE = "Hardware note" # (string) "Free" description of the hardware status and settings.
# The following metadata is the correction metadata generated by
# find_overlay.FindOverlay and passed to find_overlay.mergeMetadata
MD_ROTATION_COR = "Rotation cor" # radians, to be subtracted from MD_ROTATION
MD_PIXEL_SIZE_COR = "Pixel size cor" # (m, m), to be multiplied with MD_PIXEL_SIZE
MD_POS_COR = "Centre position cor" # (m, m), to be subtracted from MD_POS
MD_SHEAR_COR = "Shear cor" # float, vertical shear to be subtracted from MD_SHEAR
MD_BASELINE_COR = "Baseline cor" # value, to be added to MD_BASELINE
# The following metadata is the correction metadata for the Phenom image and
# spot shift as calculated by delphi.DelphiCalibration.
MD_RESOLUTION_SLOPE = "Resolution slope" # (float, float) resolution related SEM image shift, slope of linear fit
MD_RESOLUTION_INTERCEPT = "Resolution intercept" # (float, float) resolution related SEM image shift, intercept of linear fit
MD_HFW_SLOPE = "HFW slope" # (float, float) HFW related SEM image shift, slope of linear fit
MD_SPOT_SHIFT = "Spot shift" # (float, float), SEM spot shift in percentage of HFW
MD_TIME_RANGE_TO_DELAY = "Streak time range to trigger delay" # (dict) mapping time range to trigger delay in streak camera
# The following metadata is for correction on the Nikon Confocal
# dict (int (resolution X) -> dict (float (dwell time) -> tuple of 4 floats (correction factors)))
MD_SHIFT_LOOKUP = "Pixel shift compensation table"
MD_CALIB = "Calibration parameters" # (list of list of float) Calibration parameters for the correct axes mapping
# The following metadata is used to store specific known positions for the
# actuators.
MD_FAV_POS_ACTIVE = "Favourite position active" # dict of str -> float representing a good position for being "active" (eg, mirror engaged, lens in use)
MD_FAV_POS_DEACTIVE = "Favourite position deactive" # dict of str -> float representing a good position for being "deactive" (eg, mirror parked, lens not in use)
MD_FAV_POS_COATING = "Favourite position coating" # dict of str -> float representing a good position for GIS coating
MD_POS_ACTIVE_RANGE = "Range for active position" # dict str → (float, float): axis name → (min,max): the range of the axes within which can be used during imaging
MD_OVERVIEW_RANGE = "Range for overview map" # dict str → (float, float): axis name → (min,max): the range of the axes within which overview map is acquired
MD_ION_BEAM_TO_SAMPLE_ANGLE = "Ion beam to sample angle" # (float) angle between ion beam and sample stage
MD_SAFE_REL_RANGE = "Safe relative range" # (float, float) +/- safe range relative to a value
MD_SAFE_SPEED_RANGE = "Safe speed range" # (float, float) min, max of the safe speed range
# The following metadata is used to store the destination components of the
# specific known positions for the actuators.
MD_FAV_POS_ACTIVE_DEST = "Favourite position active destination" # list or set of str
MD_FAV_POS_DEACTIVE_DEST = "Favourite position deactive destination" # list or set of str
MD_AXES_ORDER_REF = "Axes order for referencing" # list of str
# The following metadata is used for the PID controller on the Focus Tracker.
MD_GAIN_P = "Proportional gain" # float
MD_GAIN_I = "Integral gain" # float
MD_GAIN_D = "Derivative gain" # float
# The following is a string containing a dict encoded in JSON, which represents all the known states
# of all the hardware used during an acquisition.
MD_EXTRA_SETTINGS = "Extra settings"
# Constant for TINT
TINT_FIT_TO_RGB = "fitrgb"
TINT_RGB_AS_IS = "rgbasis"
| gpl-2.0 |
jlegendary/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
passoir/trading-with-python | nautilus/nautilus.py | 77 | 5403 | '''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done." | bsd-3-clause |
hlin117/statsmodels | examples/python/predict.py | 33 | 1580 |
## Prediction (out of sample)
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
# ## Artificial data
nsample = 50
sig = 0.25
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, np.sin(x1), (x1-5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, 0.5, -0.02]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# ## Estimation
olsmod = sm.OLS(y, X)
olsres = olsmod.fit()
print(olsres.summary())
# ## In-sample prediction
ypred = olsres.predict(X)
print(ypred)
# ## Create a new sample of explanatory variables Xnew, predict and plot
x1n = np.linspace(20.5,25, 10)
Xnew = np.column_stack((x1n, np.sin(x1n), (x1n-5)**2))
Xnew = sm.add_constant(Xnew)
ynewpred = olsres.predict(Xnew) # predict out of sample
print(ynewpred)
# ## Plot comparison
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(x1, y, 'o', label="Data")
ax.plot(x1, y_true, 'b-', label="True")
ax.plot(np.hstack((x1, x1n)), np.hstack((ypred, ynewpred)), 'r', label="OLS prediction")
ax.legend(loc="best");
### Predicting with Formulas
# Using formulas can make both estimation and prediction a lot easier
from statsmodels.formula.api import ols
data = {"x1" : x1, "y" : y}
res = ols("y ~ x1 + np.sin(x1) + I((x1-5)**2)", data=data).fit()
# We use the `I` to indicate use of the Identity transform. Ie., we don't want any expansion magic from using `**2`
res.params
# Now we only have to pass the single variable and we get the transformed right-hand side variables automatically
res.predict(exog=dict(x1=x1n))
| bsd-3-clause |
yaukwankiu/armor | patternMatching/mark4.py | 1 | 12523 | """
mark3.py
switched to wepsFolder - the folder containing all forecasts made at different startTimes
fixed wrfPathList problem
ALGORITHM:
moment-normalised correlation
USE:
cd [.. FILL IN YOUR ROOT DIRECTORY HERE ..]/ARMOR/python/
python
from armor.patternMatching import mark4
reload(mark4)
x=mark4.main(verbose=True, saveImage=False, display=False) #<-- change it to saveImage=False to save space
x=mark4.main(verbose=True, saveImage=False, key2="e03", display=False) #<-- change it to saveImage=False to save space
"""
# 0. imports
# 1. defining the parameters
# 2. reading the data
# 3. processing
# 4. output
# 0. imports
import time, datetime, os, re
import numpy as np
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor.geometry import transformedCorrelations as tr
# 1. defining the parameters
root = dp.defaultRootFolder
radarFolder = root + 'data/1may2014/RADARCV/'
radarPath = root + "data/1may2014/RADARCV/COMPREF.20140501.1200.0p03.bin"
wepsFolder = root + "data/1may2014/WEPS/" # folder for all forecasts made at various times
wrfFolder = root + "data/1may2014/WEPS/201405010000/"
#outputFolder = root + "data/1may2014/"
outputFolder = root+ "labLogs2/patternMatching/"
numberOfFramesPerModel = 25
wrfHeight = 201
wrfWidth = 183
lowerLeft = (20.5, 118.0)
upperRight= (26.5, 123.46)
# 1a. setting up
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
wrfPathList = os.listdir(wrfFolder)
wrfPathList = [wrfFolder+v+"/" for v in wrfPathList if ".dat" in v and "wrf" in v] #trimming
dbz = pattern.DBZ
# 1a1.do this once only
"""
from armor.taiwanReliefData import convertToGrid as cg
y=cg.main(files=['100','1000','2000','3000', 'Coast'], width=wrfWidth-1, height=wrfHeight-1,
lowerLeft=(lowerLeft[1], lowerLeft[0]), # some peculiarities in early codes
upperRight=(upperRight[1], upperRight[0]),
folder=root+"python/armor/taiwanReliefData/",
suffix=".DAT",
#suffix=".txt",
outputFolder=radarFolder, dilation=0)
y2=cg.main(files=['100','1000','2000','3000', 'Coast'], width=wrfWidth-1, height=wrfHeight-1,
lowerLeft=(lowerLeft[1], lowerLeft[0]), # some peculiarities in early codes
upperRight=(upperRight[1], upperRight[0]),
folder=root+"python/armor/taiwanReliefData/",
suffix=".DAT",
#suffix=".txt",
outputFolder=wrfFolder, dilation=0)
"""
# 1b. test
a0 = dbz(dataPath=radarPath,
lowerLeftCornerLatitudeLongitude =lowerLeft,
upperRightCornerLatitudeLongitude =upperRight,
coastDataPath=radarFolder+"taiwanCoast.dat", )
a0.name = a0.dataPath.split('/')[-1][8:21]
dT = re.findall(r'\d\d\d\d', radarPath.split('/')[-1])
a0.dataTime = dT[0] + dT[1] + '.' + dT[2]
## defining the functions
# get all wrf folders
def getRadarDBZ(radarPath=radarPath):
aName = radarPath.split('/')[-1][8:21]
dT = re.findall(r'\d\d\d\d', radarPath.split('/')[-1])
dT = dT[0] + dT[1] + '.' + dT[2]
a = dbz(name=aName,
dataPath=radarPath,
dataTime=dT,
lowerLeftCornerLatitudeLongitude =lowerLeft,
upperRightCornerLatitudeLongitude =upperRight,
coastDataPath=radarFolder+"taiwanCoast.dat", )
return a
def getWrfFolders(wepsFolder=wepsFolder, a="", key1="", maxTimeDiff=6., reportLength=72):
wrfFolders = os.listdir(wepsFolder)
if a=="":
maxDataTime="99999999.9999"
minDataTime="00000000.0000"
else:
maxDataTime = a.getDataTime(a.datetime()-datetime.timedelta(1.*maxTimeDiff/24))
maxDataTime = maxDataTime[:8] + maxDataTime[9:]
minDataTime = a.getDataTime(a.datetime()-datetime.timedelta(1.*reportLength/24)) #2014-06-04
minDataTime = minDataTime[:8] + minDataTime[9:]
wrfFolders = [wepsFolder+v+"/" for v in wrfFolders if v<maxDataTime and v>=minDataTime]
wrfFolders = [v for v in wrfFolders if key1 in v]
return wrfFolders
def getWrfFrameIndices(a=a0, wrfFolder=wrfFolder, maxTimeDiff=6, timeInterval=3, verbose=False):
# 2014-06-01
# to get the frame indices in a list from a.dataTime and startTime as recorded in the wrfFolder
minDataTime = a.datetime(dh=-maxTimeDiff)
maxDataTime = a.datetime(dh= maxTimeDiff)
wrfStartTime = wrfFolder.split('/')[-2]
wrfStartTime = wrfStartTime[:8] + '.' + wrfStartTime[8:]
wrfStartTime = a.datetime(wrfStartTime)
timeDiff = a.datetime() - wrfStartTime
hoursDiff = (timeDiff.days * 86400 + timeDiff.seconds)/3600
#return hoursDiff
startIndex = int(np.ceil (1.*(hoursDiff-maxTimeDiff)/timeInterval))
endIndex = int(np.floor(1.*(hoursDiff+maxTimeDiff)/timeInterval))
if verbose:
print "wrfStartTime, timeDiff, hoursDiff"#debug
print wrfStartTime #debug
print timeDiff
print hoursDiff#debug
print "startIndex, endIndex"
print startIndex, endIndex
return startIndex, endIndex
# reading the data
def read1Wrf(wrfPath=wrfPathList[0], rawReturn=False):
wrfData = dbz(dataPath=wrfPath)
wrfData.load(height=wrfHeight*numberOfFramesPerModel*2, width=wrfWidth)
if rawReturn:
return wrfData
modelLabel = wrfData.dataPath[-6:-4]
wrfFrames = []
for i in range(numberOfFramesPerModel):
#for i in range(numberOfFramesPerModel*2):
w = dbz(name="WRF"+ modelLabel + "_T" + str(i), # model count starts from 1
dataTime="NoneGiven",
outputPath ="",imagePath="",
coordinateOrigin="default",
coastDataPath=wrfFolder+"taiwanCoast.dat",
lowerLeftCornerLatitudeLongitude =lowerLeft,
upperRightCornerLatitudeLongitude =upperRight,
)
w.matrix = wrfData.matrix[(i*2)*wrfHeight:(i*2+1)*wrfHeight, :]
wrfFrames.append(w)
return wrfFrames
# scoring key lines
def getScore(a, b):
#just a wrapper
return a.gaussianCorr(b, sigma=20, thres=0, showImage=False, saveImage=False, outputFolder='')
# 3. processing
# c. compare the matching and record
def matching(a=a0, wepsFolder=wepsFolder, thres=0, maxTimeDiff=6, timeInterval=3, key1="", key2="",
verbose=False, display=False, saveImage=False):
count = 0
a.load()
a.truncate(thres, newObject=False)
if verbose:
print "================================="
print 'name, dataTime:'
print a.name
print a.dataTime
# debug ################################################################
if display:
#a.show()
#time.sleep(1)
tr.showArrayWithAxes(a)
if saveImage:
plt.close()
tr.showArrayWithAxes(a, display=False, outputPath=outputFolder+ str(int(time.time()))+a.name+ ".png")
# end debug ############################################################
scores = []
wrfFolders = getWrfFolders(wepsFolder, a=a, maxTimeDiff=maxTimeDiff, key1=key1)
for wrfFolder in wrfFolders:
wrfPathList=os.listdir(wrfFolder)
wrfPathList = [wrfFolder+v for v in wrfPathList if ".dat" in v and "wrf" in v] #trimming
wrfPathList = [v for v in wrfPathList if key2 in v] #trimming
wrfPathList.sort()
if verbose:
print "key2:", key2
print "wrfPathList:"
print '\n'.join([str(v) for v in wrfPathList])
if wrfPathList == []:
continue
for wrfPath in wrfPathList:
# read the data one-by-one + split the data files
wrfFrames = read1Wrf(wrfPath=wrfPath)
startIndex, endIndex = getWrfFrameIndices(a=a, wrfFolder=wrfFolder,
maxTimeDiff=maxTimeDiff, timeInterval=timeInterval,
verbose=verbose)
startIndex = max(0, startIndex)
endIndex = min(numberOfFramesPerModel-1, endIndex) # fixed 2014-06-11
if verbose:
print "================================="
print wrfPath, "start and end indices:", startIndex, endIndex
for w in wrfFrames[startIndex: endIndex+1]:
w.truncate(thres, newObject=False)
# debug ################################################################
if saveImage:
plt.close()
tr.showArrayWithAxes(w, display=False,outputPath=outputFolder+ str(int(time.time()))+w.name+ ".png")
if display:
#w.show()
#time.sleep(1)
plt.close()
tr.showArrayWithAxes(w)
# end debug ############################################################
if verbose:
print count, a.name, "v", w.name, ":",
count +=1
score = getScore(a, w) # key line
if verbose:
print score
scores.append({'radar':a.name,
'score': score,
'wrfFolder': wrfFolder.split('/')[-2],
'wrf': w.name,
})
#ordering the results
scores.sort(key=lambda v:v['score'], reverse=True)
return scores
# result checking
def get1frame(model="02",T=6, wrfFolder=wrfFolder):
T = int(T)
if isinstance(model, int):
model = ("0"+str(model))[-2:]
fileName = [v for v in os.listdir(wrfFolder) if 'e'+model in v][0]
wrfFrames = read1Wrf(wrfPath=fileName)
w = wrfFrames[T]
return w
def getOutputStrings(scores, timeInterval=3, verbose=True):
# timeInterval = time interval between successive times of forecast within a WRF
outputStrings = ["# model no., time forecast made, time of forecast, delta time, score\n"]
for scoreRecord in scores:
# from
# {'wrf': 'WRF02_T6', 'radar': '20140501.1200', 'score': 0.50716670859396218, 'wrfFolder': '201405010000'} ,
# to
# # model no., time forecast made, time of forecast, delta time, score
#' 22 20140312_1200 20140312_2100 9 0.9698248506'
# 1. get the info
modelNo, deltaTime = re.findall(r'\d+', scoreRecord['wrf'])
timeForecastMade = scoreRecord['wrfFolder']
score = scoreRecord['score']
# 2. convert the format
modelNo = (" "+modelNo)[-4:]
deltaTime = timeInterval * int(deltaTime)
timeOfForecast = a0.datetime(timeForecastMade) + datetime.timedelta(1./24*deltaTime) # a0 = global object
timeOfForecast = " " +str(timeOfForecast.year) + ('0'+str(timeOfForecast.month))[-2:] +\
('0'+str(timeOfForecast.day ))[-2:] +\
"_" +('0'+str(timeOfForecast.hour))[-2:] + ('0'+str(timeOfForecast.minute))[-2:]
timeForecastMade= " " + timeForecastMade.replace(".","_")
deltaTime = (" "+str(deltaTime))[-4:]
score = " " + str(score)
outputLine = modelNo + timeForecastMade + timeOfForecast + deltaTime + score
outputStrings.append(outputLine)
if verbose:
print '\n'.join(outputStrings[:10])
return outputStrings
# 4. output the final result
# test run
def main(radarPath=radarPath, wepsFolder=wepsFolder, key1="", key2="", **kwargs):
time0=time.time()
a = getRadarDBZ(radarPath)
print "\n==============================================================="
print "comparing", a.name, a.dataTime
print "to", wepsFolder
scores = matching(a, wepsFolder,key1=key1, key2=key2, **kwargs)
outputPath = outputFolder + str(int(time.time())) + "matchingOutput_" + a.name + ".txt"
outputStrings = getOutputStrings(scores, timeInterval=3)
print "\n========\nTop 10 matches"
print "\n".join([str(v) for v in outputStrings[:10]])
print "writing to file: ", outputPath
open(outputPath,'w').write("\n".join([str(v) for v in outputStrings]))
print '\nTime spent:', time.time()-time0, 'seconds'
return scores
| cc0-1.0 |
salilab/evaluation | backend/evaluation/score_modeller.py | 1 | 4686 | from argparse import ArgumentParser
import sys
import itertools
from modeller import log, Environ, Selection
from modeller.scripts import complete_pdb
import matplotlib
# Force matplotlib (and pylab) to not use any X backend
# (we don't have a display).
matplotlib.use('Agg')
import pylab # noqa: E402
def get_profile(profile_file):
"""Read `profile_file` into a Python array."""
vals = []
with open(profile_file) as f:
for line in f:
if not line.startswith('#') and len(line) > 10:
spl = line.split()
vals.append(float(spl[-1]))
return vals
"""Runs all ModPipe Model Evaluation routines (dope,zdope,ga341)
"""
def get_options():
"""Parse command-line options"""
p = ArgumentParser(description="""
This script runs Modeller to retrieve z-dope, and ga341.
Run `%(prog)s -h` for help information
""")
p.add_argument("--model", type=str, metavar="FILE", required=True,
help="""Path and Filename of models file (PDB format)""")
p.add_argument("--seq_ident", type=float, default=None, metavar='PCT',
help="""Sequence Identity to Template PDB File.
If no sequence identity is given, either here or in the model file,
only the z-dope score will be computed.""")
return p.parse_args()
def main():
opts = get_options()
fh = open("modeller.results", "w")
fhxml = open("modeller.results.xml", "w")
print("modelfile "+str(opts))
log.minimal()
env = Environ()
env.libs.topology.read(file='$(LIB)/top_heav.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
try:
mdl = complete_pdb(env, opts.model, transfer_res_num=True)
except Exception:
print("Error in Modelfile: Not a valid PDB file\n", file=fh)
print(" <modeller_results>\n"
" <type>Error in Modelfile: Not a valid PDB file</type>\n"
" </modeller_results>", file=fhxml)
sys.exit("Error in Modelfile: Not a valid PDB file")
colors = ["green", "red", "blue", "purple"]
for c, color in zip(mdl.chains, itertools.cycle(colors)):
(c.name, len(c.residues))
selected_chain = complete_pdb(
env, opts.model, model_segment=('FIRST:'+c.name, 'LAST:'+c.name))
if not c.name:
c.name = "A"
z_dope_score = selected_chain.assess_normalized_dope()
s = Selection(selected_chain)
s.assess_dope(output='ENERGY_PROFILE NO_REPORT',
file='input.profile_'+c.name,
normalize_profile=True, smoothing_window=15)
profile = get_profile('input.profile_'+c.name)
try:
pylab.figure(1, figsize=(10, 6))
pylab.xlabel('Alignment position')
pylab.ylabel('DOPE per-residue score')
pylab.plot(profile, color=color, linewidth=2,
label='Chain '+c.name)
pylab.legend()
pylab.savefig('dope_profile.png', dpi=65)
pylab.savefig('dope_profile.svg')
except Exception:
pass
try:
(ga341, compactness, e_native_pair, e_native_surf, e_native_comb,
z_pair, z_surf, z_comb) = selected_chain.assess_ga341()
except ValueError:
# Provide sequence identity if GA341 needs it
if opts.seq_ident is None:
continue
selected_chain.seq_id = opts.seq_ident
(ga341, compactness, e_native_pair, e_native_surf, e_native_comb,
z_pair, z_surf, z_comb) = selected_chain.assess_ga341()
print("%s SeqIdent %f\n\n%s ZDOPE %f\n\n%s GA341 %f\n%s Z-PAIR %f\n"
"%s Z-SURF %f\n%s Z-COMBI %f\n%s Compactness %f\n"
% (c.name, selected_chain.seq_id, c.name, z_dope_score, c.name,
ga341, c.name, z_pair, c.name, z_surf, c.name, z_comb, c.name,
compactness), file=fh)
print(" <modeller_results>\n"
" <model>%s</model>\n"
" <chain>%s</chain>\n"
" <sequence_identity>%f</sequence_identity>\n"
" <zdope>%f</zdope>\n"
" <ga341>%f</ga341>\n"
" <z_pair>%f</z_pair>\n"
" <z_surf>%f</z_surf>\n"
" <z_comb>%f</z_comb>\n"
" <compactness>%f</compactness>\n"
" </modeller_results>\n"
% (opts.model, c.name, selected_chain.seq_id, z_dope_score,
ga341, z_pair, z_surf, z_comb, compactness), file=fhxml)
fh.close()
fhxml.close()
if __name__ == '__main__':
main()
| lgpl-2.1 |
anne-urai/RT_RDK | graphicalModels/examples/galex.py | 7 | 2540 | """
The GALEX Photon Catalog
========================
This is the Hogg \& Schiminovich model for how photons turn into
counts in the GALEX satellite data stream. Note the use of relative
positioning.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
pgm = daft.PGM([5.4, 5.4], origin=[1.2, 1.2])
wide = 1.5
verywide = 1.5 * wide
dy = 0.75
# electrons
el_x, el_y = 2., 2.
pgm.add_plate(daft.Plate([el_x - 0.6, el_y - 0.6, 2.2, 2 * dy + 0.3], label="electrons $i$"))
pgm.add_node(daft.Node("xabc", r"xa$_i$,xabc$_i$,ya$_i$,\textit{etc}", el_x + 0.5, el_y + 0 * dy, aspect=2.3 * wide, observed=True))
pgm.add_node(daft.Node("xyti", r"$x_i,y_i,t_i$", el_x + 1., el_y + 1 * dy, aspect=wide))
pgm.add_edge("xyti", "xabc")
# intensity fields
ph_x, ph_y = el_x + 2.5, el_y + 3 * dy
pgm.add_node(daft.Node("Ixyt", r"$I_{\nu}(x,y,t)$", ph_x, ph_y, aspect=verywide))
pgm.add_edge("Ixyt", "xyti")
pgm.add_node(daft.Node("Ixnt", r"$I_{\nu}(\xi,\eta,t)$", ph_x, ph_y + 1 * dy, aspect=verywide))
pgm.add_edge("Ixnt", "Ixyt")
pgm.add_node(daft.Node("Iadt", r"$I_{\nu}(\alpha,\delta,t)$", ph_x, ph_y + 2 * dy, aspect=verywide))
pgm.add_edge("Iadt", "Ixnt")
# s/c
sc_x, sc_y = ph_x + 1.5, ph_y - 1.5 * dy
pgm.add_node(daft.Node("dark", r"dark", sc_x, sc_y - 1 * dy, aspect=wide))
pgm.add_edge("dark", "xyti")
pgm.add_node(daft.Node("flat", r"flat", sc_x, sc_y, aspect=wide))
pgm.add_edge("flat", "xyti")
pgm.add_node(daft.Node("att", r"att", sc_x, sc_y + 3 * dy))
pgm.add_edge("att", "Ixnt")
pgm.add_node(daft.Node("optics", r"optics", sc_x, sc_y + 2 * dy, aspect=wide))
pgm.add_edge("optics", "Ixyt")
pgm.add_node(daft.Node("psf", r"psf", sc_x, sc_y + 1 * dy))
pgm.add_edge("psf", "xyti")
pgm.add_node(daft.Node("fee", r"f.e.e.", sc_x, sc_y - 2 * dy, aspect=wide))
pgm.add_edge("fee", "xabc")
# sky
pgm.add_node(daft.Node("sky", r"sky", sc_x, sc_y + 4 * dy))
pgm.add_edge("sky", "Iadt")
# stars
star_x, star_y = el_x, el_y + 4 * dy
pgm.add_plate(daft.Plate([star_x - 0.6, star_y - 0.6, 2.2, 2 * dy + 0.3], label="stars $n$"))
pgm.add_node(daft.Node("star adt", r"$I_{\nu,n}(\alpha,\delta,t)$", star_x + 0.5, star_y + 1 * dy, aspect=verywide))
pgm.add_edge("star adt", "Iadt")
pgm.add_node(daft.Node("star L", r"$L_{\nu,n}(t)$", star_x + 1, star_y, aspect=wide))
pgm.add_edge("star L", "star adt")
pgm.add_node(daft.Node("star pos", r"$\vec{x_n}$", star_x, star_y))
pgm.add_edge("star pos", "star adt")
# done
pgm.render()
pgm.figure.savefig("galex.pdf")
pgm.figure.savefig("galex.png", dpi=150)
| mit |
bfalacerda/strands_executive | task_executor/src/task_executor/routine_analyser.py | 2 | 15847 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import mongodb_store_msgs.srv as dc_srv
import mongodb_store.util as dc_util
from mongodb_store.message_store import MessageStoreProxy
from strands_executive_msgs.msg import Task, TaskEvent
from datetime import datetime, timedelta, time, date
from task_executor import task_routine, task_query
from task_executor.utils import rostime_to_python, python_to_rostime
from task_executor.task_query import task_groups_in_window, daily_windows_in_range
import pytz
from dateutil.relativedelta import *
import matplotlib.patches as mpatches
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib as mpl
import argparse
import cmd
class RoutineAnalyser(cmd.Cmd):
def __init__(self, msg_store, routine_pairs, tz, daily_start = None, daily_end = None, days_off = []):
cmd.Cmd.__init__(self)
# super(RoutineAnalyser, self).__init__()
self.routine_pairs = routine_pairs
self.msg_store = msg_store
self.daily_start = daily_start if daily_start is not None else time(0,0)
self.daily_end = daily_end if daily_end is not None else time(23,59)
# hand code for now
self.days_off = days_off
self.tz = tz
self.colour_mappings = dict()
def check_idx(self, idx):
if idx < 0 or idx >= len(self.routine_pairs):
print 'idx must be greater than 0 and less than %s' % len(self.routine_pairs)
return False
else:
return True
def do_merge(self, idx):
try:
if idx == 'all':
self.routine_pairs = [(self.routine_pairs[0][0], self.routine_pairs[-1][1])]
else:
idx = int(idx)
if idx >= 0 and idx < len(self.routine_pairs) - 1:
print 'merging %s into %s' % (idx, idx+1)
self.routine_pairs[idx+1] = (self.routine_pairs[idx][0], self.routine_pairs[idx+1][1])
del self.routine_pairs[idx]
else:
print 'invalid routine index, valid range from 0 to %s' % (len(self.routine_pairs) - 2)
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def do_print(self, line):
for i in range(len(self.routine_pairs)):
start = rostime_to_python(self.routine_pairs[i][0].time, self.tz)
end = rostime_to_python(self.routine_pairs[i][1].time, self.tz)
results = task_query.query_tasks(self.msg_store,
start_date=start,
end_date=end,
event=[TaskEvent.TASK_STARTED]
)
print 'routine %s: %s to %s, duration: %s, tasks: %s' % (i, start, end, end-start, len(results))
def do_summarise(self, idx):
try:
idx = int(idx)
if not self.check_idx(idx):
return
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
results = task_query.query_tasks(self.msg_store,
event=range(TaskEvent.TASK_STARTED, TaskEvent.ROUTINE_STARTED),
start_date=window_start,
end_date=window_end,
)
task_query.aggregate(results)
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def all_task_events_in_window(self, window_start, window_end, action = None):
return task_query.query_tasks(self.msg_store,
event=range(TaskEvent.TASK_STARTED, TaskEvent.ROUTINE_STARTED),
action=action,
start_date=window_start,
end_date=window_end,
)
def autonomy_durations(self, daily_start, daily_end):
autonomy_duration = task_query.autonomy_time(daily_start, daily_end, self.msg_store)
day_duration = daily_end - daily_start
return autonomy_duration, day_duration
def autonomy_day(self, daily_start, daily_end):
day = daily_start.strftime("%A")
date = daily_start.date()
autonomy_duration, day_duration = self.autonomy_durations(daily_start, daily_end)
autonomy_percentage = (autonomy_duration.total_seconds() / day_duration.total_seconds()) * 100.0
print '%s: %s from %s -> %s' % (daily_start.date(), autonomy_duration, day_duration, autonomy_percentage)
return autonomy_duration, day_duration
def to_hours_minutes_seconds(self, s):
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
return hours, minutes, seconds
def do_autonomy(self, idx):
try:
idx = int(idx)
if not self.check_idx(idx):
return
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
total_autonomy_duration = timedelta(seconds=0)
total_life_duration = timedelta(seconds=1)
for daily_start, daily_end in daily_windows_in_range(self.daily_start, self.daily_end, window_start, window_end, self.days_off):
autonomy_duration, day_duration = self.autonomy_day(daily_start, daily_end)
total_autonomy_duration += autonomy_duration
total_life_duration += day_duration
daily_start = datetime.combine((daily_start + timedelta(days=1)).date(), self.daily_start)
daily_end = datetime.combine((daily_end + timedelta(days=1)).date(), self.daily_end)
total_autonomy_percentage = (total_autonomy_duration.total_seconds() / total_life_duration.total_seconds()) * 100.0
tl_h, tl_m, tl_s = self.to_hours_minutes_seconds(total_life_duration.total_seconds())
ta_h, ta_m, ta_s = self.to_hours_minutes_seconds(total_autonomy_duration.total_seconds())
print 'Total life time: %s hours, %s minutes' % (tl_h, tl_m)
print 'Total autonomy: %s hours, %s minutes' % (ta_h, ta_m)
print 'A: %s' % total_autonomy_percentage
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def do_timeplot(self, line):
try:
tokens = line.split(' ')
idx = tokens[0]
filename = tokens[1]
idx = int(idx)
if not self.check_idx(idx):
return
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
# get all the task starts
results = task_query.query_tasks(self.msg_store,
event=TaskEvent.TASK_STARTED,
start_date=window_start,
end_date=window_end,
)
# convert to an array of times
dates = [rostime_to_python(event[0].time, self.tz) for event in results]
with PdfPages('{0}_time_plot.pdf'.format(filename)) as pdf:
n, bins, patches = plt.hist([date.hour + date.minute/60.0 for date in dates], bins = 24*60/15)
# plt.show()
pdf.savefig()
plt.close()
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def do_lateness(self, idx):
""" Not recommended for use...
"""
try:
idx = int(idx)
if not self.check_idx(idx):
return
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
means = []
stddev = []
count = []
for daily_start, daily_end in daily_windows_in_range(self.daily_start, self.daily_end, window_start, window_end):
day_errors = np.array([((task_group[2].task.execution_time - task_group[2].time).to_sec()/(task_group[2].time - task_group[1].time).to_sec()) for task_group in task_groups_in_window(daily_start, daily_end, self.msg_store, event=[TaskEvent.ADDED, TaskEvent.NAVIGATION_STARTED, TaskEvent.NAVIGATION_SUCCEEDED])])
if len(day_errors) > 5:
means.append(day_errors.mean())
stddev.append(day_errors.std())
count.append(len(means))
plt.errorbar(count, means, stddev)
plt.show()
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def draw_task(self, y, action, start_time, end_time):
# start and end times are ros times, we need to turn them into just hours
start_midnight = datetime.fromordinal(rostime_to_python(start_time, self.tz).date().toordinal())
start_midnight = start_midnight.replace(tzinfo=self.tz)
start = (rostime_to_python(start_time, self.tz) - start_midnight).total_seconds()
end = (rostime_to_python(end_time, self.tz) - start_midnight).total_seconds()
label = None
if action not in self.colour_mappings:
colour_map = plt.get_cmap('Paired')
self.colour_mappings[action] = colour_map(len(self.colour_mappings) * 30)
label = action
plt.hlines(y, start, end, self.colour_mappings[action], lw=6, label=label)
def do_days(self, idx):
try:
idx = int(idx)
if not self.check_idx(idx):
return
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
working_day_count = 0
for daily_start, daily_end in daily_windows_in_range(self.daily_start, self.daily_end, window_start, window_end, self.days_off):
working_day_count += 1
all_day_count = 0
for daily_start, daily_end in daily_windows_in_range(self.daily_start, self.daily_end, window_start, window_end):
all_day_count += 1
print 'This routine covered {0} working days from a total of {1}'.format(working_day_count, all_day_count)
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def do_taskplot(self, line):
try:
tokens = line.split(' ')
idx = tokens[0]
filename = tokens[1]
idx = int(idx)
if not self.check_idx(idx):
return
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
daily_tasks = []
for daily_start, daily_end in daily_windows_in_range(self.daily_start, self.daily_end, window_start, window_end, self.days_off):
succeeded_tasks = np.array([(task_group[0].task.action, task_group[0].time, task_group[1].time) for task_group in task_groups_in_window(daily_start, daily_end, self.msg_store, event=[TaskEvent.TASK_STARTED, TaskEvent.TASK_SUCCEEDED])], dtype=object)
failed_tasks = np.array([(task_group[0].task.action, task_group[0].time, task_group[1].time) for task_group in task_groups_in_window(daily_start, daily_end, self.msg_store, event=[TaskEvent.TASK_STARTED, TaskEvent.TASK_FAILED])], dtype=object)
# print len(succeeded_tasks)
# print len(failed_tasks)
# can't concatenate 0 length np.arrays
if len(succeeded_tasks) == 0:
all_tasks = failed_tasks
elif len(failed_tasks) == 0:
# print succeeded_tasks
all_tasks = succeeded_tasks
else:
all_tasks = np.concatenate((succeeded_tasks,failed_tasks), axis=0)
print daily_start.date(), len(all_tasks)
daily_tasks.append([daily_start.date(), all_tasks])
with PdfPages('{0}_task_plot.pdf'.format(filename)) as pdf:
y_sep = 6
y = 0
mpl.rcParams['font.size'] = 6
#
y_label_points = []
y_labels = []
for task_date, task_times in reversed(daily_tasks):
y += y_sep
y_label_points.append(y)
y_labels.append(task_date.strftime('%A, %B %d %Y'))
for task_time in task_times:
self.draw_task(y, task_time[0], task_time[1], task_time[2])
plt.ylim(0, y + y_sep)
x_label_points = []
x_labels = []
for hour in range(self.daily_start.hour-1, self.daily_end.hour+1):
seconds_per_hour = 60 * 60
x_label_points.append(hour * seconds_per_hour)
x_labels.append('%s:00' % hour)
plt.xticks(x_label_points, x_labels, rotation='vertical')
plt.yticks(y_label_points, y_labels, rotation='horizontal')
lgd = plt.legend(loc='lower right', bbox_to_anchor=(1,1), ncol=2, prop={'size': 8})
# plt.gcf().tight_layout()
pdf.savefig(bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
except ValueError, e:
print 'provided argument was not an int: %s' % idx
def do_executions(self, line):
try:
tokens = line.split(' ')
idx = tokens[0]
if len(tokens) > 1 and len(tokens[1]) > 0:
action = tokens[1]
print 'only showing action: %s' % action
else:
action = None
idx = int(idx)
if not self.check_idx(idx):
return
print 'executions in routine %s' % idx
window_start = rostime_to_python(self.routine_pairs[idx][0].time, self.tz)
window_end = rostime_to_python(self.routine_pairs[idx][1].time, self.tz)
results = task_query.query_tasks(self.msg_store,
event=range(TaskEvent.TASK_STARTED, TaskEvent.ROUTINE_STARTED),
action=action,
start_date=window_start,
end_date=window_end,
)
task_query.executions(results)
except ValueError, e:
print 'provided argument was not an int: %s' % idx
print e
def help_print(self):
print '\n'.join([ 'print', 'Print the available routines'])
def help_merge(self):
print '\n'.join([ 'merge [idx | all]', 'Merge routine idx (int) into the routine after it. If idx is the string all, then merge all routines together.'])
def help_executions(self):
print '\n'.join([ 'executions [idx]', 'Show all task executions in routine idx (int).'])
def help_summarise(self):
print '\n'.join([ 'summarise [idx]', 'Summarise task executions in routine idx (int).'])
def help_autonomy(self):
print '\n'.join([ 'autonomy [idx]', 'Print autonomy percentage for routine idx (int).'])
def do_EOF(self, line):
return True
def help_EOF(self):
print 'Exit (CTRL-D)'
| mit |
JohannesUIBK/oggm | oggm/sandbox/itmix/itmix.py | 2 | 25792 | from __future__ import absolute_import, division
import glob
import logging
# Built ins
import os
from shutil import copyfile
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import pandas as pd
import rasterio
# External libs
import salem
import shapely.geometry as shpg
from osgeo import gdal
from salem.datasets import EsriITMIX
from scipy import optimize as optimization
from scipy.interpolate import griddata
import oggm.cfg as cfg
# Locals
from oggm import entity_task
from oggm import utils
from oggm.core.preprocessing.gis import gaussian_blur, _mask_per_divide
from oggm.core.preprocessing.inversion import mass_conservation_inversion
from oggm.sandbox.itmix.itmix_cfg import DATA_DIR, ITMIX_ODIR
# Module logger
log = logging.getLogger(__name__)
# Globals
SEARCHD = os.path.join(DATA_DIR, 'itmix', 'glaciers_sorted')
LABEL_STRUCT = np.ones((3, 3))
def find_path(start_dir, pattern, allow_more=False):
"""Find a file in a dir and subdir"""
files = []
for dir, _, _ in os.walk(start_dir):
files.extend(glob.glob(os.path.join(dir,pattern)))
if allow_more:
assert len(files) > 0
return files
else:
assert len(files) == 1
return files[0]
def get_rgi_df(reset=False):
"""This function prepares a kind of `fake` RGI file, with the updated
geometries for ITMIX.
"""
# This makes an RGI dataframe with all ITMIX + WGMS + GTD glaciers
RGI_DIR = utils.get_rgi_dir()
df_rgi_file = os.path.join(DATA_DIR, 'itmix', 'itmix_rgi_shp.pkl')
if os.path.exists(df_rgi_file) and not reset:
rgidf = pd.read_pickle(df_rgi_file)
else:
linkf = os.path.join(DATA_DIR, 'itmix', 'itmix_rgi_links.pkl')
df_itmix = pd.read_pickle(linkf)
f, d = utils.get_wgms_files()
wgms_df = pd.read_csv(f)
f = utils.get_glathida_file()
gtd_df = pd.read_csv(f)
divides = []
rgidf = []
_rgi_ids_for_overwrite = []
for i, row in df_itmix.iterrows():
log.info('Prepare RGI df for ' + row.name)
# read the rgi region
rgi_shp = find_path(RGI_DIR, row['rgi_reg'] + '_rgi50_*.shp')
rgi_df = salem.read_shapefile(rgi_shp, cached=True)
rgi_parts = row.T['rgi_parts_ids']
sel = rgi_df.loc[rgi_df.RGIId.isin(rgi_parts)].copy()
# use the ITMIX shape where possible
if row.name in ['Hellstugubreen', 'Freya', 'Aqqutikitsoq',
'Brewster', 'Kesselwandferner', 'NorthGlacier',
'SouthGlacier', 'Tasman', 'Unteraar',
'Washmawapta', 'Columbia']:
shf = find_path(SEARCHD, '*_' + row.name + '*.shp')
shp = salem.read_shapefile(shf)
if row.name == 'Unteraar':
shp = shp.iloc[[-1]]
if 'LineString' == shp.iloc[0].geometry.type:
shp.loc[shp.index[0], 'geometry'] = shpg.Polygon(shp.iloc[0].geometry)
if shp.iloc[0].geometry.type == 'MultiLineString':
# Columbia
geometry = shp.iloc[0].geometry
parts = list(geometry)
for p in parts:
assert p.type == 'LineString'
exterior = shpg.Polygon(parts[0])
# let's assume that all other polygons are in fact interiors
interiors = []
for p in parts[1:]:
assert exterior.contains(p)
interiors.append(p)
geometry = shpg.Polygon(parts[0], interiors)
assert 'Polygon' in geometry.type
shp.loc[shp.index[0], 'geometry'] = geometry
assert len(shp) == 1
area_km2 = shp.iloc[0].geometry.area * 1e-6
shp = salem.gis.transform_geopandas(shp)
shp = shp.iloc[0].geometry
sel = sel.iloc[[0]]
sel.loc[sel.index[0], 'geometry'] = shp
sel.loc[sel.index[0], 'Area'] = area_km2
elif row.name == 'Urumqi':
# ITMIX Urumqi is in fact two glaciers
shf = find_path(SEARCHD, '*_' + row.name + '*.shp')
shp2 = salem.read_shapefile(shf)
assert len(shp2) == 2
for k in [0, 1]:
shp = shp2.iloc[[k]].copy()
area_km2 = shp.iloc[0].geometry.area * 1e-6
shp = salem.gis.transform_geopandas(shp)
shp = shp.iloc[0].geometry
assert sel.loc[sel.index[k], 'geometry'].contains(shp.centroid)
sel.loc[sel.index[k], 'geometry'] = shp
sel.loc[sel.index[k], 'Area'] = area_km2
assert len(sel) == 2
elif len(rgi_parts) > 1:
# Ice-caps. Make divides
# First we gather all the parts:
sel = rgi_df.loc[rgi_df.RGIId.isin(rgi_parts)].copy()
# Make the multipolygon for the record
multi = shpg.MultiPolygon([g for g in sel.geometry])
# update the RGI attributes. We take a dummy rgi ID
new_area = np.sum(sel.Area)
found = False
for i in range(len(sel)):
tsel = sel.iloc[[i]].copy()
if 'Multi' in tsel.loc[tsel.index[0], 'geometry'].type:
continue
else:
found = True
sel = tsel
break
if not found:
raise RuntimeError()
inif = 0.
add = 1e-5
if row.name == 'Devon':
inif = 0.001
add = 1e-4
while True:
buff = multi.buffer(inif)
if 'Multi' in buff.type:
inif += add
else:
break
x, y = multi.centroid.xy
if 'Multi' in buff.type:
raise RuntimeError
sel.loc[sel.index[0], 'geometry'] = buff
sel.loc[sel.index[0], 'Area'] = new_area
sel.loc[sel.index[0], 'CenLon'] = np.asarray(x)[0]
sel.loc[sel.index[0], 'CenLat'] = np.asarray(y)[0]
# Divides db
div_sel = dict()
for k, v in sel.iloc[0].iteritems():
if k == 'geometry':
div_sel[k] = multi
elif k == 'RGIId':
div_sel['RGIID'] = v
else:
div_sel[k] = v
divides.append(div_sel)
else:
pass
# add glacier name to the entity
name = ['I:' + row.name] * len(sel)
add_n = sel.RGIId.isin(wgms_df.RGI_ID.values)
for z, it in enumerate(add_n.values):
if it:
name[z] = 'W-' + name[z]
add_n = sel.RGIId.isin(gtd_df.RGI_ID.values)
for z, it in enumerate(add_n.values):
if it:
name[z] = 'G-' + name[z]
sel.loc[:, 'Name'] = name
rgidf.append(sel)
# Add divides to the original one
adf = pd.DataFrame(divides)
adf.to_pickle(cfg.PATHS['itmix_divs'])
log.info('N glaciers ITMIX: {}'.format(len(rgidf)))
# WGMS glaciers which are not already there
# Actually we should remove the data of those 7 to be honest...
f, d = utils.get_wgms_files()
wgms_df = pd.read_csv(f)
wgms_df = wgms_df.loc[~ wgms_df.RGI_ID.isin(_rgi_ids_for_overwrite)]
log.info('N glaciers WGMS: {}'.format(len(wgms_df)))
for i, row in wgms_df.iterrows():
rid = row.RGI_ID
reg = rid.split('-')[1].split('.')[0]
# read the rgi region
rgi_shp = find_path(RGI_DIR, reg + '_rgi50_*.shp')
rgi_df = salem.read_shapefile(rgi_shp, cached=True)
sel = rgi_df.loc[rgi_df.RGIId.isin([rid])].copy()
assert len(sel) == 1
# add glacier name to the entity
_cor = row.NAME.replace('/', 'or').replace('.', '').replace(' ', '-')
name = ['W:' + _cor] * len(sel)
add_n = sel.RGIId.isin(gtd_df.RGI_ID.values)
for z, it in enumerate(add_n.values):
if it:
name[z] = 'G-' + name[z]
for n in name:
if len(n) > 48:
raise
sel.loc[:, 'Name'] = name
rgidf.append(sel)
_rgi_ids_for_overwrite.extend(wgms_df.RGI_ID.values)
# GTD glaciers which are not already there
# Actually we should remove the data of those 2 to be honest...
gtd_df = gtd_df.loc[~ gtd_df.RGI_ID.isin(_rgi_ids_for_overwrite)]
log.info('N glaciers GTD: {}'.format(len(gtd_df)))
for i, row in gtd_df.iterrows():
rid = row.RGI_ID
reg = rid.split('-')[1].split('.')[0]
# read the rgi region
rgi_shp = find_path(RGI_DIR, reg + '_rgi50_*.shp')
rgi_df = salem.read_shapefile(rgi_shp, cached=True)
sel = rgi_df.loc[rgi_df.RGIId.isin([rid])].copy()
assert len(sel) == 1
# add glacier name to the entity
_corname = row.NAME.replace('/', 'or').replace('.', '').replace(' ', '-')
name = ['G:' + _corname] * len(sel)
for n in name:
if len(n) > 48:
raise
sel.loc[:, 'Name'] = name
rgidf.append(sel)
# Save for not computing each time
rgidf = pd.concat(rgidf)
rgidf.to_pickle(df_rgi_file)
return rgidf
@entity_task(log, writes=['gridded_data', 'geometries'])
def glacier_masks_itmix(gdir):
"""Converts the glacier vector geometries to grids.
Uses where possible the ITMIX DEM
Parameters
----------
gdir : oggm.GlacierDirectory
"""
# open srtm tif-file:
dem_ds = gdal.Open(gdir.get_filepath('dem'))
dem = dem_ds.ReadAsArray().astype(float)
# Correct the DEM (ASTER...)
# Currently we just do a linear interp -- ASTER is totally shit anyway
min_z = -999.
if np.min(dem) <= min_z:
xx, yy = gdir.grid.ij_coordinates
pnan = np.nonzero(dem <= min_z)
pok = np.nonzero(dem > min_z)
if len(pok[0]) > 0:
points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T
inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T
dem[pnan] = griddata(points, np.ravel(dem[pok]), inter)
msg = gdir.rgi_id + ': DEM needed interpolation'
msg += '({:.1f}% missing).'.format(len(pnan[0])/len(dem.flatten())*100)
log.warning(msg)
else:
dem = dem*np.NaN
# Replace DEM values with ITMIX ones where possible
# Open DEM
dem_f = None
n_g = gdir.name.split(':')[-1]
searchf = os.path.join(DATA_DIR, 'itmix', 'glaciers_sorted', '*')
searchf = os.path.join(searchf, '02_surface_' + n_g + '_*.asc')
for dem_f in glob.glob(searchf):
pass
if dem_f is None:
# try synth
n_g = gdir.rgi_id
searchf = os.path.join(DATA_DIR, 'itmix', 'glaciers_synth', '*')
searchf = os.path.join(searchf, '02_surface_' + n_g + '*.asc')
for dem_f in glob.glob(searchf):
pass
if dem_f is not None:
log.info('%s: ITMIX DEM file: %s', gdir.rgi_id, dem_f)
it_dem_ds = EsriITMIX(dem_f)
it_dem = it_dem_ds.get_vardata()
it_dem = np.where(it_dem < -999., np.NaN, it_dem)
# for some glaciers, trick
if n_g in ['Academy', 'Devon']:
it_dem = np.where(it_dem <= 0, np.NaN, it_dem)
it_dem = np.where(np.isfinite(it_dem), it_dem, np.nanmin(it_dem))
if n_g in ['Brewster', 'Austfonna']:
it_dem = np.where(it_dem <= 0, np.NaN, it_dem)
# Transform to local grid
it_dem = gdir.grid.map_gridded_data(it_dem, it_dem_ds.grid,
interp='linear')
# And update values where possible
if n_g in ['Synthetic2', 'Synthetic1']:
dem = np.where(~ it_dem.mask, it_dem, np.nanmin(it_dem))
else:
dem = np.where(~ it_dem.mask, it_dem, dem)
else:
if 'Devon' in n_g:
raise RuntimeError('Should have found DEM for Devon')
# Disallow negative
dem = dem.clip(0)
# Grid
nx = dem_ds.RasterXSize
ny = dem_ds.RasterYSize
assert nx == gdir.grid.nx
assert ny == gdir.grid.ny
# Proj
geot = dem_ds.GetGeoTransform()
x0 = geot[0] # UL corner
y0 = geot[3] # UL corner
dx = geot[1]
dy = geot[5] # Negative
assert dx == -dy
assert dx == gdir.grid.dx
assert y0 == gdir.grid.corner_grid.y0
assert x0 == gdir.grid.corner_grid.x0
dem_ds = None # to be sure...
# Smooth SRTM?
if cfg.PARAMS['smooth_window'] > 0.:
gsize = np.rint(cfg.PARAMS['smooth_window'] / dx)
smoothed_dem = gaussian_blur(dem, np.int(gsize))
else:
smoothed_dem = dem.copy()
# Make entity masks
log.debug('%s: glacier mask, divide %d', gdir.rgi_id, 0)
_mask_per_divide(gdir, 0, dem, smoothed_dem)
# Glacier divides
nd = gdir.n_divides
if nd == 1:
# Optim: just make links
linkname = gdir.get_filepath('gridded_data', div_id=1)
sourcename = gdir.get_filepath('gridded_data')
# overwrite as default
if os.path.exists(linkname):
os.remove(linkname)
# TODO: temporary suboptimal solution
try:
# we are on UNIX
os.link(sourcename, linkname)
except AttributeError:
# we are on windows
copyfile(sourcename, linkname)
linkname = gdir.get_filepath('geometries', div_id=1)
sourcename = gdir.get_filepath('geometries')
# overwrite as default
if os.path.exists(linkname):
os.remove(linkname)
# TODO: temporary suboptimal solution
try:
# we are on UNIX
os.link(sourcename, linkname)
except AttributeError:
# we are on windows
copyfile(sourcename, linkname)
else:
# Loop over divides
for i in gdir.divide_ids:
log.debug('%s: glacier mask, divide %d', gdir.rgi_id, i)
_mask_per_divide(gdir, i, dem, smoothed_dem)
def _prepare_inv(gdirs):
# Get test glaciers (all glaciers with thickness data)
fpath = utils.get_glathida_file()
try:
gtd_df = pd.read_csv(fpath).sort_values(by=['RGI_ID'])
except AttributeError:
gtd_df = pd.read_csv(fpath).sort(columns=['RGI_ID'])
dfids = gtd_df['RGI_ID'].values
print('GTD Glac before', len(dfids))
ref_gdirs = []
for gdir in gdirs:
if gdir.rgi_id not in dfids:
continue
if gdir.glacier_type == 'Ice cap':
continue
if gdir.terminus_type in ['Marine-terminating', 'Lake-terminating',
'Dry calving', 'Regenerated',
'Shelf-terminating']:
continue
ref_gdirs.append(gdir)
print('GTD Glac after', len(ref_gdirs))
ref_rgiids = [gdir.rgi_id for gdir in ref_gdirs]
gtd_df = gtd_df.set_index('RGI_ID').loc[ref_rgiids]
# Account for area differences between glathida and rgi
ref_area_km2 = np.asarray([gdir.rgi_area_km2 for gdir in ref_gdirs])
gtd_df.VOLUME = gtd_df.MEAN_THICKNESS * gtd_df.GTD_AREA * 1e-3
ref_cs = gtd_df.VOLUME.values / (gtd_df.GTD_AREA.values**1.375)
ref_volume_km3 = ref_cs * ref_area_km2**1.375
ref_thickness_m = ref_volume_km3 / ref_area_km2 * 1000.
gtd_df['ref_area_km2'] = ref_area_km2
gtd_df['ref_volume_km3'] = ref_volume_km3
gtd_df['ref_thickness_m'] = ref_thickness_m
gtd_df['ref_gdirs'] = ref_gdirs
return gtd_df
def optimize_thick(gdirs):
"""Optimizes fd based on GlaThiDa thicknesses.
We use the glacier averaged thicknesses provided by GlaThiDa and correct
them for differences in area with RGI, using a glacier specific volume-area
scaling formula.
Parameters
----------
gdirs: list of oggm.GlacierDirectory objects
"""
gtd_df = _prepare_inv(gdirs)
ref_gdirs = gtd_df['ref_gdirs']
ref_volume_km3 = gtd_df['ref_volume_km3']
ref_area_km2 = gtd_df['ref_area_km2']
ref_thickness_m = gtd_df['ref_thickness_m']
# Optimize without sliding
log.info('Compute the inversion parameter.')
def to_optimize(x):
tmp_ = np.zeros(len(ref_gdirs))
glen_a = cfg.A * x[0]
for i, gdir in enumerate(ref_gdirs):
v, a = mass_conservation_inversion(gdir, glen_a=glen_a,
fs=0., write=False)
tmp_[i] = v / a
return utils.rmsd(tmp_, ref_thickness_m)
opti = optimization.minimize(to_optimize, [1.],
bounds=((0.01, 10), ),
tol=1.e-4)
# Check results and save.
glen_a = cfg.A * opti['x'][0]
fs = 0.
# This is for the stats
oggm_volume_m3 = np.zeros(len(ref_gdirs))
rgi_area_m2 = np.zeros(len(ref_gdirs))
for i, gdir in enumerate(ref_gdirs):
v, a = mass_conservation_inversion(gdir, glen_a=glen_a, fs=fs,
write=False)
oggm_volume_m3[i] = v
rgi_area_m2[i] = a
assert np.allclose(rgi_area_m2 * 1e-6, ref_area_km2)
# This is for each glacier
out = dict()
out['glen_a'] = glen_a
out['fs'] = fs
out['factor_glen_a'] = opti['x'][0]
try:
out['factor_fs'] = opti['x'][1]
except IndexError:
out['factor_fs'] = 0.
for gdir in gdirs:
gdir.write_pickle(out, 'inversion_params')
# This is for the working dir
# Simple stats
out['vol_rmsd'] = utils.rmsd(oggm_volume_m3 * 1e-9, ref_volume_km3)
out['thick_rmsd'] = utils.rmsd(oggm_volume_m3 / (ref_area_km2 * 1e6),
ref_thickness_m)
log.info('Optimized glen_a and fs with a factor {factor_glen_a:.2f} and '
'{factor_fs:.2f} for a thick RMSD of {thick_rmsd:.3f}'.format(
**out))
df = pd.DataFrame(out, index=[0])
fpath = os.path.join(cfg.PATHS['working_dir'],
'inversion_optim_params.csv')
df.to_csv(fpath)
# All results
df = utils.glacier_characteristics(ref_gdirs)
df['ref_area_km2'] = ref_area_km2
df['ref_volume_km3'] = ref_volume_km3
df['ref_thickness_m'] = ref_thickness_m
df['oggm_volume_km3'] = oggm_volume_m3 * 1e-9
df['oggm_thickness_m'] = oggm_volume_m3 / (ref_area_km2 * 1e6)
df['vas_volume_km3'] = 0.034*(df['ref_area_km2']**1.375)
df['vas_thickness_m'] = df['vas_volume_km3'] / ref_area_km2 * 1000
rgi_id = [gdir.rgi_id for gdir in ref_gdirs]
df = pd.DataFrame(df, index=rgi_id)
fpath = os.path.join(cfg.PATHS['working_dir'],
'inversion_optim_results.csv')
df.to_csv(fpath)
# return value for tests
return out
def synth_apparent_mb(gdir, tstar=None, bias=None):
"""Compute local mustar and apparent mb from tstar.
Parameters
----------
gdir : oggm.GlacierDirectory
tstar: int
the year where the glacier should be equilibrium
bias: int
the associated reference bias
"""
# Ok. Looping over divides
for div_id in list(gdir.divide_ids):
log.info('%s: apparent mb synth')
# For each flowline compute the apparent MB
fls = gdir.read_pickle('inversion_flowlines', div_id=div_id)
# Reset flux
for fl in fls:
fl.flux = np.zeros(len(fl.surface_h))
n_g = gdir.rgi_id
searchf = os.path.join(DATA_DIR, 'itmix', 'glaciers_synth', '*')
searchf = os.path.join(searchf, '04_mb_' + n_g + '*.asc')
for dem_f in glob.glob(searchf):
pass
ds_mb = salem.EsriITMIX(dem_f)
mb = ds_mb.get_vardata() * 1000.
mb = np.where(mb < -9998, np.NaN, mb)
f = os.path.join(DATA_DIR, 'itmix', 'glaciers_synth',
'01_margin_'+ n_g +'_0000_UTM00.shp')
ds_mb.set_roi(f)
mb = np.where(ds_mb.roi, mb, np.NaN)
searchf = os.path.join(DATA_DIR, 'itmix', 'glaciers_synth', '*')
searchf = os.path.join(searchf, '02_*_' + n_g + '*.asc')
for dem_f in glob.glob(searchf):
pass
ds_dem = salem.EsriITMIX(dem_f)
dem = ds_dem.get_vardata()
from scipy import stats
pok = np.where(np.isfinite(mb) & (mb > 0))
slope_p, _, _, _, _ = stats.linregress(dem[pok], mb[pok])
pok = np.where(np.isfinite(mb) & (mb < 0))
slope_m, _, _, _, _ = stats.linregress(dem[pok], mb[pok])
def ela_mb_grad(ela_h, h):
return np.where(h < ela_h, slope_m * (h - ela_h),
slope_p * (h - ela_h))
# Get all my hs
hs = []
ws = []
for fl in fls:
hs = np.append(hs, fl.surface_h)
ws = np.append(ws, fl.widths)
# find ela for zero mb
def to_optim(x):
tot_mb = np.average(ela_mb_grad(x[0], hs), weights=ws)
return tot_mb**2
opti = optimization.minimize(to_optim, [1000.],
bounds=((0., 10000),),
tol=1e-6)
# Check results and save.
final_elah = opti['x'][0]
# print(final_elah)
# pok = np.where(np.isfinite(mb))
# plt.plot(dem[pok], mb[pok], 'o')
# plt.plot(hs, ela_mb_grad(final_elah, hs), 'o')
# plt.show()
# Flowlines in order to be sure
# TODO: here it would be possible to test for a minimum mb gradient
# and change prcp factor if judged useful
for fl in fls:
mb_on_h = ela_mb_grad(final_elah, fl.surface_h)
fl.set_apparent_mb(mb_on_h)
# Check
if div_id >= 1:
if not np.allclose(fls[-1].flux[-1], 0., atol=0.01):
log.warning('%s: flux should be zero, but is: %.2f',
gdir.rgi_id,
fls[-1].flux[-1])
# Overwrite
gdir.write_pickle(fls, 'inversion_flowlines', div_id=div_id)
def write_itmix_ascii(gdir, version):
"""Write the results"""
gname = gdir.name.replace('I:', '')
real = gname
gname = gname.replace('_A', '')
gname = gname.replace('_B', '')
log.info('Write ITMIX ' + real)
# Get the data
grids_file = gdir.get_filepath('gridded_data', div_id=0)
with netCDF4.Dataset(grids_file) as nc:
thick = nc.variables['thickness'][:]
vol = np.nansum(thick * gdir.grid.dx**2)
# Transform to output grid
try:
ifile = find_path(os.path.join(DATA_DIR, 'itmix', 'glaciers_sorted'),
'02_surface_' + gname + '*.asc')
except AssertionError:
gname = gdir.rgi_id
searchf = os.path.join(DATA_DIR, 'itmix', 'glaciers_synth')
ifile = find_path(searchf, '02_surface_' + gname + '*.asc')
itmix = salem.EsriITMIX(ifile)
thick = itmix.grid.map_gridded_data(thick, gdir.grid, interp='linear')
# Mask out
itmix.set_roi(shape=gdir.get_filepath('outlines'))
omask = itmix.roi
thick[np.nonzero(omask==0)] = np.nan
# Output path
bname = os.path.basename(ifile).split('.')[0]
pok = bname.find('UTM')
zone = bname[pok:]
ofile = os.path.join(ITMIX_ODIR, gname)
if not os.path.exists(ofile):
os.mkdir(ofile)
fname = 'Maussion_' + real + '_bedrock_v{}_'.format(version) + zone +'.asc'
ofile = os.path.join(ofile, fname)
# Write out
with rasterio.drivers():
with rasterio.open(ifile) as src:
topo = src.read(1).astype(np.float)
topo = np.where(topo < -999., np.NaN, topo)
# Also for ours
thick = np.where(topo < -999., np.NaN, thick)
# Be sure volume is conserved
thick *= vol / np.nansum(thick*itmix.grid.dx**2)
assert np.isclose(np.nansum(thick*itmix.grid.dx**2), vol)
# convert
topo -= thick
with rasterio.open(ofile, 'w',
driver=src.driver,
width=src.width,
height=src.height,
transform=src.transform,
count=1,
dtype=np.float,
nodata=np.NaN) as dst:
dst.write_band(1, topo)
# Check
with rasterio.open(ifile) as src:
topo = src.read(1).astype(np.float)
topo = np.where(topo < -9999., np.NaN, topo)
with rasterio.open(ofile) as src:
mtopo = src.read(1).astype(np.float)
mtopo = np.where(mtopo < -9999., np.NaN, mtopo)
if not np.allclose(np.nanmax(topo - mtopo), np.nanmax(thick), atol=5):
print(np.nanmax(topo - mtopo), np.nanmax(thick))
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.imshow(topo - mtopo)
ax2.imshow(thick)
ax3.imshow(topo - mtopo - thick)
ax4.imshow(~np.isclose(topo - mtopo, thick, atol=1, equal_nan=True))
plt.show() | gpl-3.0 |
rmcgibbo/hyperopt-mixtape | fip35/experiment.py | 1 | 4295 | import os
import sys
import time
import argparse
import numpy as np
import mdtraj as md
from hyperopt import hp, tpe, fmin, STATUS_OK
from hyperopt.pyll import scope
from hyperopt.mongoexp import MongoTrials
from hpmixtape import modelFactory, pipelineFactory
from mixtape.pca import PCA
from mixtape.tica import tICA
from mixtape.featurizer import DihedralFeaturizer
from mixtape.markovstatemodel import MarkovStateModel
from mixtape.cluster import MiniBatchKMeans, KCenters, GMM
from mixtape.datasets import fetch_fs_peptide
from sklearn.cross_validation import KFold
from sklearn.externals.joblib import Memory
TRAJECTORIES = None
def load_trajectories():
pdb = md.load('/home/rmcgibbo/datasets/Fip35-WW/structures/ww_native.pdb')
heavy = pdb.top.select_atom_indices('heavy')
pdb = pdb.atom_slice(heavy)
trajectories = [md.load('/home/rmcgibbo/datasets/Fip35-WW/trj0.lh5', stride=50, atom_indices=heavy),
md.load('/home/rmcgibbo/datasets/Fip35-WW/trj1.lh5', stride=50, atom_indices=heavy)]
# split each trajectory into 3 chunks
out = []
for t in trajectories:
t.top = pdb.top
n = len(t) / 3
for i in range(0, len(t), n):
chunk = t[i:i+n]
if len(chunk) > 1:
out.append(chunk)
print([len(t) for t in out])
return out
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mongo', required=True)
parser.add_argument('--exp-key', required=True)
args = parser.parse_args()
# from hyperopt import Trials
# trials = Trials()
trials = MongoTrials(args.mongo, exp_key=args.exp_key)
best = fmin(fit_and_score, modelspace, trials=trials, algo=tpe.suggest, max_evals=500)
modelspace = {'_factory': pipelineFactory,
'steps': [
hp.choice('featurization', [
{'_class': DihedralFeaturizer,
'_factory': modelFactory,
'types' : ['phi', 'psi'],
'sincos': True},
{'_class': DihedralFeaturizer,
'_factory': modelFactory,
'types': ['phi', 'psi', 'chi1'],
'sincos': True},
{'_class': DihedralFeaturizer,
'_factory': modelFactory,
'types': ['phi', 'psi', 'chi1', 'chi2'],
'sincos': True},
]),
hp.choice('preprocessing', [
{'_class': PCA,
'_factory': modelFactory,
'n_components': scope.int(hp.quniform('pca_n_components', 2, 20, 1)),
'copy': False},
{'_class': tICA,
'_factory': modelFactory,
'n_components': scope.int(hp.quniform('tica_n_components', 2, 20, 1)),
'gamma': hp.choice('tica_gamma', [0, 1e-7, 1e-5, 1e-3, 1e-1]),
'weighted_transform': hp.choice('tica_weighted_transform', [True, False])
}
]),
hp.choice('cluster', [
{'_class': MiniBatchKMeans,
'_factory': modelFactory,
'n_clusters': scope.int(hp.quniform('kmeans_n_clusters', 10, 1000, 10)),
'batch_size': 10000,
'n_init': 1,
},
]),
{'_class': MarkovStateModel,
'_factory': modelFactory,
'verbose': False,
'n_timescales': 3,
'reversible_type': 'transpose'
},
]}
def fit_and_score(model_spec):
global TRAJECTORIES
if TRAJECTORIES is None:
TRAJECTORIES = load_trajectories()
model = model_spec['_factory'](model_spec)
parameters = {k:v for k, v in model.get_params().items() if '__' in k}
train_scores, test_scores, fit_times = [], [], []
cv = KFold(len(TRAJECTORIES), n_folds=3)
for fold, (train_index, test_index) in enumerate(cv):
train_data = [TRAJECTORIES[i] for i in train_index]
test_data = [TRAJECTORIES[i] for i in test_index]
start = time.time()
try:
model.fit(train_data)
except:
print(model)
raise
fit_times.append(time.time()-start)
train_scores.append(model.score(train_data))
test_scores.append(model.score(test_data))
result = {'loss': -np.mean(test_scores),
'status': STATUS_OK,
'train_scores': train_scores,
'test_scores': test_scores,
'parameters': parameters,
'fit_times': fit_times}
print(result)
return result
| lgpl-2.1 |
Elixeus/NLP | sequence_models/HMM/viterbi_pos.py | 1 | 4810 | import numpy as np
import pandas as pd
def viterbi(A, B, observ):
"""Hidden Markov models need to solve 3 fundamental problems:
1. For a given HMM model M and an observation sequence O, what is the likelihood of P(O|M);
2. For a given HMM model M and an observation sequence O, what is the best hidden state sequence Q(think of O as words, and Q as syntactic categories);
3. For an observation sequence O, and the set of hidden states, learn the HMM parameters A and B;
The viterbi aims at solving the second problem: for a given model and observation sequence, find out the most likely hidden state sequence.
The viterbi algorithm is very similar to the forward algorithm, except that within the inner for loop, it looks for the maximum value of the trellis instead of the sum of all trellises. It also keeps track of the path.
There are different ways to keep track of the largest path. The one I use here looks for the argmax of each column, and keep the x and y args in a tuple, and append this tuple to a list
----------------------------
params:
A: the transition probability matrix; probability to change from one syntactic category to the next
A type: pandas dataframe
B: the emission probability matrix; probability to emit a symbol given a syntactic category
B type: pandas dataframe
observ: the observed sequence of words
observ type: tuple of strings"""
# initialization
T = len(observ)
N = B.shape[0]
backpointer = [(0, 0)]
vtb = pd.DataFrame(np.zeros((N+2, T+1)),
columns=('start',)+observ,
index=('start',)+tuple(B.index)+('end',))
print vtb
# viterbi algorithm
for s in vtb.index[1:-1]:
vtb.loc[s, observ[0]] = A.loc['start', s] * B.loc[s, observ[0]]
for t in xrange(2, T+1):
for s in xrange(1, N+1):
vtb.iloc[s, t] = max(vtb.iloc[s_p, t-1] * A.iloc[s_p, s] * B.iloc[s-1, t-1]
for s_p in xrange(1, N+1))
# find the argmax of each column and create a path
pointers = [(vtb.loc[:, i].argmax(), i) for i in observ]
backpointer.extend(pointers)
vtb.iloc[-1, T] = max(vtb.iloc[s, T]* A.iloc[s, -1] for s in xrange(1, N+1))
print vtb
return backpointer
if __name__ == '__main__':
# A = np.matrix(((0, 0.2, 0.8, 0),
# (0, 0.5, 0.4, 0.1),
# (0, 0.3, 0.6, 0.1),
# (0, 0, 0, 0)))
# B = {2 : {1:0.2, 2:0.4, 3:0.4},
# 1 : {1:0.5, 2:0.4, 3:0.1}}
A_matrix = np.matrix(((0, 0.2767, 0.0006, 0.0031, 0.0453, 0.0449, 0.0510, 0.2026),
(0, 0.3777, 0.0110, 0.0009, 0.0084, 0.0584, 0.0090, 0.0025),
(0, 0.0008, 0.0002, 0.7968, 0.0005, 0.0008, 0.1698, 0.0041),
(0, 0.0322, 0.0005, 0.0050, 0.0837, 0.0615, 0.0514, 0.2231),
(0, 0.0366, 0.0004, 0.0001, 0.0733, 0.4509, 0.0036, 0.0036),
(0, 0.0096, 0.0176, 0.0014, 0.0086, 0.1216, 0.0177, 0.0068),
(0, 0.0068, 0.0102, 0.1011, 0.1012, 0.0120, 0.0728, 0.0479),
(0, 0.1147, 0.0021, 0.0002, 0.2157, 0.4744, 0.0102, 0.0017))
)
A = pd.DataFrame(A_matrix,
columns=('start','NNP', 'MD', 'VB', 'JJ',
'NN', 'RB', 'DT'),
index=('start', 'NNP', 'MD', 'VB', 'JJ',
'NN', 'RB', 'DT'))
B_matrix = {'NNP': {'Janet': 0.000032, 'will': 0, 'back': 0,
'the': 0.000048, 'bill': 0},
'MD': {'Janet': 0, 'will': 0.308431, 'back': 0,
'the': 0, 'bill': 0},
'VB': {'Janet': 0, 'will': 0.000028, 'back': 0.000672,
'the':0, 'bill':0.000028},
'JJ': {'Janet': 0, 'will': 0, 'back': 0.000340,
'the': 0.000097, 'bill': 0},
'NN': {'Janet': 0, 'will': 0.0002, 'back': 0.000223,
'the': 0.000006, 'bill': 0.002337},
'RB': {'Janet': 0, 'will': 0, 'back': 0.010446,
'the': 0, 'bill': 0},
'DT': {'Janet': 0, 'will': 0, 'back': 0,
'the': 0.506099, 'bill': 0}
}
B = pd.DataFrame(B_matrix).T.reindex(A.index[1:],
columns=['Janet', 'will', 'back',
'the', 'bill'])
# OBS1 = (3, 3, 1, 1, 2, 2, 3, 1, 3)
# OBS2 = (3, 3, 1, 1, 2, 3, 3, 1, 2)
# RESULT1 = viterbi(A, B, OBS1)
# RESULT2 = viterbi(A, B, OBS2)
# print RESULT1
# print RESULT2
#OBS3 = (3, 1, 3)
#RESULT3 = viterbi(A, B, OBS3)
#print RESULT3
OBS4 = ('Janet', 'will', 'back', 'the', 'bill')
RESULT4 = viterbi(A, B, OBS4)
print RESULT4
| mit |
fabianp/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
wllmtrng/ggplot | ggplot/stats/stat_bar.py | 12 | 1322 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from .stat import stat
_MSG_LABELS = """There are more than 30 unique values mapped to x.
If you want a histogram instead, use 'geom_histogram()'.
"""
class stat_bar(stat):
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'bar', 'position': 'stack',
'width': 0.9, 'drop': False,
'origin': None, 'labels': None}
def _calculate(self, data):
# reorder x according to the labels
new_data = pd.DataFrame()
new_data["x"] = self.labels
for column in set(data.columns) - set('x'):
column_dict = dict(zip(data["x"],data[column]))
default = 0 if column == "y" else data[column].values[0]
new_data[column] = [column_dict.get(val, default)
for val in self.labels]
return new_data
def _calculate_global(self, data):
labels = self.params['labels']
if labels == None:
labels = sorted(set(data['x'].values))
# For a lot of labels, put out a warning
if len(labels) > 30:
self._print_warning(_MSG_LABELS)
# Check if there is a mapping
self.labels = labels
| bsd-2-clause |
RachitKansal/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
NeuroVault/NeuroVault | neurovault/apps/statmaps/tests/test_comparison.py | 4 | 11915 | import nibabel
import numpy
import os
import shutil
import tempfile
from django.test import TestCase
from numpy.testing import assert_almost_equal, assert_equal
from neurovault.apps.statmaps.models import Comparison, Similarity, User, Collection, Image
from neurovault.apps.statmaps.tasks import save_voxelwise_pearson_similarity, get_images_by_ordered_id, save_resampled_transformation_single
from neurovault.apps.statmaps.tests.utils import clearDB, save_statmap_form
from neurovault.apps.statmaps.utils import split_4D_to_3D, get_similar_images
class ComparisonTestCase(TestCase):
pk1 = None
pk1_copy = None
pk2 = None
pk3 = None
pearson_metric = None
pknan = None
def setUp(self):
print "Preparing to test image comparison..."
self.tmpdir = tempfile.mkdtemp()
app_path = os.path.abspath(os.path.dirname(__file__))
self.u1 = User.objects.create(username='neurovault')
self.comparisonCollection1 = Collection(name='comparisonCollection1', owner=self.u1,
DOI='10.3389/fninf.2015.00008')
self.comparisonCollection1.save()
self.comparisonCollection2 = Collection(name='comparisonCollection2', owner=self.u1,
DOI='10.3389/fninf.2015.00009')
self.comparisonCollection2.save()
self.comparisonCollection3 = Collection(name='comparisonCollection3', owner=self.u1,
DOI='10.3389/fninf.2015.00010')
self.comparisonCollection3.save()
self.comparisonCollection4 = Collection(name='comparisonCollection4', owner=self.u1,
DOI='10.3389/fninf.2015.00011')
self.comparisonCollection4.save()
self.comparisonCollection5 = Collection(name='comparisonCollection5', owner=self.u1,
DOI='10.3389/fninf.2015.00012')
self.comparisonCollection5.save()
image1 = save_statmap_form(image_path=os.path.join(app_path,'test_data/api/VentralFrontal_thr75_summaryimage_2mm.nii.gz'),
collection=self.comparisonCollection1,
image_name = "image1",
ignore_file_warning=True)
self.pk1 = image1.id
# Image 2 is equivalent to 1, so pearson should be 1.0
image2 = save_statmap_form(image_path=os.path.join(app_path,'test_data/api/VentralFrontal_thr75_summaryimage_2mm.nii.gz'),
collection=self.comparisonCollection2,
image_name = "image1_copy",
ignore_file_warning=True)
self.pk1_copy = image2.id
# "Bricks" images
bricks = split_4D_to_3D(nibabel.load(os.path.join(app_path,'test_data/TTatlas.nii.gz')),tmp_dir=self.tmpdir)
image3 = save_statmap_form(image_path=bricks[0][1],collection=self.comparisonCollection3,image_name="image2",ignore_file_warning=True)
self.pk2 = image3.id
image4 = save_statmap_form(image_path=bricks[1][1],collection=self.comparisonCollection4,image_name="image3",ignore_file_warning=True)
self.pk3 = image4.id
# This last image is a statmap with NaNs to test that transformation doesn't eliminate them
image_nan = save_statmap_form(image_path=os.path.join(app_path,'test_data/statmaps/motor_lips_nan.nii.gz'),
collection=self.comparisonCollection5,
image_name = "image_nan",
ignore_file_warning=True)
self.pknan = image_nan.id
Similarity.objects.update_or_create(similarity_metric="pearson product-moment correlation coefficient",
transformation="voxelwise",
metric_ontology_iri="http://webprotege.stanford.edu/RCS8W76v1MfdvskPLiOdPaA",
transformation_ontology_iri="http://webprotege.stanford.edu/R87C6eFjEftkceScn1GblDL")
self.pearson_metric = Similarity.objects.filter(similarity_metric="pearson product-moment correlation coefficient",
transformation="voxelwise",
metric_ontology_iri="http://webprotege.stanford.edu/RCS8W76v1MfdvskPLiOdPaA",
transformation_ontology_iri="http://webprotege.stanford.edu/R87C6eFjEftkceScn1GblDL")
def tearDown(self):
shutil.rmtree(self.tmpdir)
clearDB()
# When generating transformations for comparison, NaNs should be maintained in the map
# (and not replaced with zero / interpolated to "almost zero" values.
def test_interpolated_transform_zeros(self):
img = save_resampled_transformation_single(self.pknan, resample_dim=[4, 4, 4])
data = numpy.load(img.reduced_representation.file)
print "Does transformation calculation maintain NaN values?: %s" %(numpy.isnan(data).any())
assert_equal(numpy.isnan(data).any(),True)
def test_save_pearson_similarity(self):
# Should be 1
print "Testing %s vs. %s: same images, different ids" %(self.pk1,self.pk1_copy)
save_voxelwise_pearson_similarity(self.pk1,self.pk1_copy)
# Should not be saved
with self.assertRaises(Exception):
print "Testing %s vs. %s: same pks, success is raising exception" %(self.pk1,self.pk1)
save_voxelwise_pearson_similarity(self.pk1,self.pk1)
print "Testing %s vs. %s, different image set 1" %(self.pk1,self.pk2)
save_voxelwise_pearson_similarity(self.pk1,self.pk2)
print "Testing %s vs. %s, different image set 2" %(self.pk2,self.pk3)
save_voxelwise_pearson_similarity(self.pk2,self.pk3)
# Should not exist
print "Success for this test means there are no comparisons returned."
image1, image1_copy = get_images_by_ordered_id(self.pk1, self.pk1)
comparison = Comparison.objects.filter(image1=image1,image2=image1_copy,similarity_metric=self.pearson_metric)
self.assertEqual(len(comparison), 0)
# Should be 1
print "Success for this test means a score of 1.0"
image1, image2 = get_images_by_ordered_id(self.pk1, self.pk1_copy)
comparison = Comparison.objects.filter(image1=image1,image2=image2,similarity_metric=self.pearson_metric)
self.assertEqual(len(comparison), 1)
self.assertAlmostEqual(comparison[0].similarity_score, 1.0)
print "Success for the remaining tests means a specific comparison score."
image1, image2 = get_images_by_ordered_id(self.pk1, self.pk2)
comparison = Comparison.objects.filter(image1=image1,image2=image2,similarity_metric=self.pearson_metric)
self.assertEqual(len(comparison), 1)
print comparison[0].similarity_score
assert_almost_equal(comparison[0].similarity_score, 0.214495998015581,decimal=5)
image2, image3 = get_images_by_ordered_id(self.pk3, self.pk2)
comparison = Comparison.objects.filter(image1=image2,image2=image3,similarity_metric=self.pearson_metric)
self.assertEqual(len(comparison), 1)
print comparison[0].similarity_score
assert_almost_equal(comparison[0].similarity_score, 0.312548260435768,decimal=5)
def test_private_to_public_switch(self):
private_collection1 = Collection(name='privateCollection1',owner=self.u1, private=True,
DOI='10.3389/fninf.2015.00099')
private_collection1.save()
private_collection2 = Collection(name='privateCollection2',owner=self.u1, private=True,
DOI='10.3389/fninf.2015.00089')
private_collection2.save()
app_path = os.path.abspath(os.path.dirname(__file__))
private_image1 = save_statmap_form(image_path=os.path.join(app_path,'test_data/statmaps/all.nii.gz'),
collection=private_collection1,
image_name = "image1")
private_image2 = save_statmap_form(image_path=os.path.join(app_path,'test_data/statmaps/motor_lips.nii.gz'),
collection=private_collection2,
image_name = "image2")
comparison = Comparison.objects.filter(image1=private_image1,image2=private_image2)
self.assertEqual(len(comparison), 0)
print "before private: %s"%Comparison.objects.all().count()
private_collection1 = Collection.objects.get(pk=private_collection1.pk)
private_collection1.private = False
private_collection1.save()
private_collection2 = Collection.objects.get(pk=private_collection2.pk)
private_collection2.private = False
private_collection2.save()
print "after private: %s"%Comparison.objects.all().count()
print private_collection1.basecollectionitem_set.instance_of(Image).all()
comparison = Comparison.objects.filter(image1=private_image1,image2=private_image2)
self.assertEqual(len(comparison), 1)
def test_add_DOI(self):
collection1 = Collection(name='Collection1', owner=self.u1, private=False)
collection1.save()
collection2 = Collection(name='Collection2', owner=self.u1, private=False)
collection2.save()
app_path = os.path.abspath(os.path.dirname(__file__))
image1 = save_statmap_form(image_path=os.path.join(app_path,'test_data/statmaps/all.nii.gz'),
collection=collection1,
image_name = "image1")
image2 = save_statmap_form(image_path=os.path.join(app_path,'test_data/statmaps/motor_lips.nii.gz'),
collection=collection2,
image_name = "image2")
comparison = Comparison.objects.filter(image1=image1,image2=image2)
self.assertEqual(len(comparison), 0)
print "without DOI: %s"%Comparison.objects.all().count()
collection = Collection.objects.get(pk=collection1.pk)
collection.DOI = '10.3389/fninf.2015.00020'
collection.save()
print "with DOI: %s"%Comparison.objects.all().count()
print collection.basecollectionitem_set.instance_of(Image).all()
comparison = Comparison.objects.filter(image1=image1,image2=image2)
self.assertEqual(len(comparison), 1)
def test_get_similar_images(self):
collection1 = Collection(name='Collection1', owner=self.u1,
DOI='10.3389/fninf.2015.00099')
collection1.save()
collection2 = Collection(name='Collection2', owner=self.u1,
DOI='10.3389/fninf.2015.00089')
collection2.save()
app_path = os.path.abspath(os.path.dirname(__file__))
image1 = save_statmap_form(image_path=os.path.join(app_path, 'test_data/statmaps/all.nii.gz'),
collection=collection1,
image_name="image1")
image2 = save_statmap_form(image_path=os.path.join(app_path, 'test_data/statmaps/all.nii.gz'),
collection=collection2,
image_name="image2")
similar_images = get_similar_images(int(image1.pk))
print "Success for this test means the pandas DataFrame shows the copy in first position with score of 1"
self.assertEqual(similar_images['image_id'][0], int(image2.pk))
self.assertEqual(similar_images['score'][0], 1)
| mit |
koverholt/ibis | ibis/impala/tests/test_client.py | 2 | 10737 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from ibis.compat import unittest
from ibis.impala.tests.common import IbisTestEnv, ImpalaE2E, connect_test
from ibis.tests.util import assert_equal
import ibis
import ibis.common as com
import ibis.config as config
import ibis.expr.types as ir
import ibis.util as util
def approx_equal(a, b, eps):
assert abs(a - b) < eps
ENV = IbisTestEnv()
class TestImpalaClient(ImpalaE2E, unittest.TestCase):
def test_execute_exprs_default_backend(self):
cases = [
(ibis.literal(2), 2)
]
ibis.options.default_backend = None
client = connect_test(ENV, with_hdfs=False)
assert ibis.options.default_backend is client
for expr, expected in cases:
result = expr.execute()
assert result == expected
def test_raise_ibis_error_no_hdfs(self):
# #299
client = connect_test(ENV, with_hdfs=False)
self.assertRaises(com.IbisError, getattr, client, 'hdfs')
def test_get_table_ref(self):
table = self.db.functional_alltypes
assert isinstance(table, ir.TableExpr)
table = self.db['functional_alltypes']
assert isinstance(table, ir.TableExpr)
def test_run_sql(self):
query = """SELECT li.*
FROM {0}.tpch_lineitem li
""".format(self.test_data_db)
table = self.con.sql(query)
li = self.con.table('tpch_lineitem')
assert isinstance(table, ir.TableExpr)
assert_equal(table.schema(), li.schema())
expr = table.limit(10)
result = expr.execute()
assert len(result) == 10
def test_sql_with_limit(self):
query = """\
SELECT *
FROM functional_alltypes
LIMIT 10"""
table = self.con.sql(query)
ex_schema = self.con.get_schema('functional_alltypes')
assert_equal(table.schema(), ex_schema)
def test_raw_sql(self):
query = 'SELECT * from functional_alltypes limit 10'
cur = self.con.raw_sql(query, results=True)
rows = cur.fetchall()
cur.release()
assert len(rows) == 10
def test_explain(self):
t = self.con.table('functional_alltypes')
expr = t.group_by('string_col').size()
result = self.con.explain(expr)
assert isinstance(result, str)
def test_get_schema(self):
t = self.con.table('tpch_lineitem')
schema = self.con.get_schema('tpch_lineitem',
database=self.test_data_db)
assert_equal(t.schema(), schema)
def test_result_as_dataframe(self):
expr = self.alltypes.limit(10)
ex_names = expr.schema().names
result = self.con.execute(expr)
assert isinstance(result, pd.DataFrame)
assert list(result.columns) == ex_names
assert len(result) == 10
def test_adapt_scalar_array_results(self):
table = self.alltypes
expr = table.double_col.sum()
result = self.con.execute(expr)
assert isinstance(result, float)
with config.option_context('interactive', True):
result2 = expr.execute()
assert isinstance(result2, float)
expr = (table.group_by('string_col')
.aggregate([table.count().name('count')])
.string_col)
result = self.con.execute(expr)
assert isinstance(result, pd.Series)
def test_interactive_repr_call_failure(self):
t = self.con.table('tpch_lineitem').limit(100000)
t = t[t, t.l_receiptdate.cast('timestamp').name('date')]
keys = [t.date.year().name('year'), 'l_linestatus']
filt = t.l_linestatus.isin(['F'])
expr = (t[filt]
.group_by(keys)
.aggregate(t.l_extendedprice.mean().name('avg_px')))
w2 = ibis.trailing_window(9, group_by=expr.l_linestatus,
order_by=expr.year)
metric = expr['avg_px'].mean().over(w2)
enriched = expr[expr, metric]
with config.option_context('interactive', True):
repr(enriched)
def test_array_default_limit(self):
t = self.alltypes
result = self.con.execute(t.float_col, limit=100)
assert len(result) == 100
def test_limit_overrides_expr(self):
# #418
t = self.alltypes
result = self.con.execute(t.limit(10), limit=5)
assert len(result) == 5
def test_limit_equals_none_no_limit(self):
t = self.alltypes
with config.option_context('sql.default_limit', 10):
result = t.execute(limit=None)
assert len(result) > 10
def test_verbose_log_queries(self):
queries = []
def logger(x):
queries.append(x)
with config.option_context('verbose', True):
with config.option_context('verbose_log', logger):
self.con.table('tpch_orders', database=self.test_data_db)
assert len(queries) == 1
expected = 'DESCRIBE {0}.`tpch_orders`'.format(self.test_data_db)
assert queries[0] == expected
def test_sql_query_limits(self):
table = self.con.table('tpch_nation', database=self.test_data_db)
with config.option_context('sql.default_limit', 100000):
# table has 25 rows
assert len(table.execute()) == 25
# comply with limit arg for TableExpr
assert len(table.execute(limit=10)) == 10
# state hasn't changed
assert len(table.execute()) == 25
# non-TableExpr ignores default_limit
assert table.count().execute() == 25
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 25
with config.option_context('sql.default_limit', 20):
# TableExpr observes default limit setting
assert len(table.execute()) == 20
# explicit limit= overrides default
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=23)) == 23
# non-TableExpr ignores default_limit
assert table.count().execute() == 25
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 25
# eliminating default_limit doesn't break anything
with config.option_context('sql.default_limit', None):
assert len(table.execute()) == 25
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=10000)) == 25
assert table.count().execute() == 25
assert table.count().execute(limit=10) == 25
def test_expr_compile_verify(self):
table = self.db.functional_alltypes
expr = table.double_col.sum()
assert isinstance(expr.compile(), str)
assert expr.verify()
def test_api_compile_verify(self):
t = self.db.functional_alltypes
s = t.string_col
supported = s.lower()
unsupported = s.replace('foo', 'bar')
assert ibis.impala.verify(supported)
assert not ibis.impala.verify(unsupported)
def test_database_repr(self):
assert self.test_data_db in repr(self.db)
def test_database_drop(self):
tmp_name = '__ibis_test_{0}'.format(util.guid())
self.con.create_database(tmp_name)
db = self.con.database(tmp_name)
self.temp_databases.append(tmp_name)
db.drop()
assert not self.con.exists_database(tmp_name)
def test_database_default_current_database(self):
db = self.con.database()
assert db.name == self.con.current_database
def test_namespace(self):
ns = self.db.namespace('tpch_')
assert 'tpch_' in repr(ns)
table = ns.lineitem
expected = self.db.tpch_lineitem
attrs = dir(ns)
assert 'lineitem' in attrs
assert 'functional_alltypes' not in attrs
assert_equal(table, expected)
def test_close_drops_temp_tables(self):
from posixpath import join as pjoin
hdfs_path = pjoin(self.test_data_dir, 'parquet/tpch_region')
client = connect_test(ENV)
table = client.parquet_file(hdfs_path)
name = table.op().name
assert self.con.exists_table(name) is True
client.close()
assert not self.con.exists_table(name)
def test_execute_async_simple(self):
t = self.db.functional_alltypes
expr = t.double_col.sum()
q = expr.execute(async=True)
result = q.get_result()
expected = expr.execute()
assert result == expected
def test_query_cancel(self):
import time
t = self.db.functional_alltypes
t2 = t.union(t).union(t)
# WM: this query takes about 90 seconds to execute for me locally, so
# I'm eyeballing an acceptable time frame for the cancel to work
expr = t2.join(t2).count()
start = time.clock()
q = expr.execute(async=True)
q.cancel()
end = time.clock()
elapsed = end - start
assert elapsed < 5
assert q.is_finished()
def test_set_compression_codec(self):
old_opts = self.con.get_options()
assert old_opts['COMPRESSION_CODEC'].upper() == 'NONE'
self.con.set_compression_codec('snappy')
opts = self.con.get_options()
assert opts['COMPRESSION_CODEC'].upper() == 'SNAPPY'
self.con.set_compression_codec(None)
opts = self.con.get_options()
assert opts['COMPRESSION_CODEC'].upper() == 'NONE'
def test_disable_codegen(self):
self.con.disable_codegen(False)
opts = self.con.get_options()
assert opts['DISABLE_CODEGEN'] == '0'
self.con.disable_codegen()
opts = self.con.get_options()
assert opts['DISABLE_CODEGEN'] == '1'
impala_con = self.con.con
cur1 = impala_con.execute('SET')
cur2 = impala_con.execute('SET')
opts1 = dict(cur1.fetchall())
cur1.release()
opts2 = dict(cur2.fetchall())
cur2.release()
assert opts1['DISABLE_CODEGEN'] == '1'
assert opts2['DISABLE_CODEGEN'] == '1'
| apache-2.0 |
jpautom/scikit-learn | sklearn/metrics/pairwise.py | 9 | 45248 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
mattilyra/scikit-learn | examples/manifold/plot_compare_methods.py | 39 | 4036 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.